file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
parser.py | <=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi':
{'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
"""
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign)
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def | (self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
def | _text | identifier_name |
parser.py | <=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi':
{'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
"""
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
|
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def _text(self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
| txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign) | conditional_block |
parser.py | b {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi':
{'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
"""
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign)
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def _text(self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
def _closetag(self):
if not self._opened_tags:
self._error(SYNTAX_ERROR, (0, self._currline))
self._buffer = []
tag, _, closeprev, self._has_child, self._has_text = self._opened_tags.pop()
self._eventhandler.close_tag(tag)
if closeprev:
self._closetag()
def _include_hsd(self, fname):
| fname = common.unquote(fname.strip())
parser = HsdParser(eventhandler=self._eventhandler)
parser.parse(fname) | identifier_body | |
parser.py | <=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi': |
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign)
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def _text(self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
| {'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
""" | random_line_split |
main.py | encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
"""
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x)
def ICM(state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a_loss_list = []
icm_loss_list = []
entropy_list = []
intrinsic_rewards = []
average_100 = []
plot_rewards = []
max_steps = 2024
for ep in range(max_episodes+1):
state = env.reset()
done = False
state_batch = []
next_state_batch = []
value_batch = []
action_batch = []
logprob_batch = []
rewards_batch = []
masks = []
for step in range(max_steps):
state = torch.from_numpy(state).unsqueeze(0).float()
action, logprob, _ = actor(state.to(device))
value = critic(state.to(device))
next_state, reward, done, _ = env.step(action[0].cpu().numpy())
state_batch.append(state)
next_state_batch.append(torch.from_numpy(next_state).unsqueeze(0).float())
value_batch.append(value.item())
logprob_batch.append(logprob)
action_batch.append(action)
rewards_batch.append(reward)
masks.append(1 - done)
state = next_state
if done:
state = env.reset()
|
# Intrinsic Curiosity Calculation
state1_batch = torch.cat(state_batch) | random_line_split | |
main.py | .net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
"""
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x)
def | (state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a | ICM | identifier_name |
main.py | .net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
"""
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x)
def ICM(state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
| ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a_loss | for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
| conditional_block |
main.py | .net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
|
def ICM(state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a | """
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x) | identifier_body |
TimeSlotGroup.js | izer'
import { elementType, dateFormat } from './utils/propTypes'
import EventSlot from './EventSlot';
import { DropTarget } from 'react-dnd';
import MasterListSlot from './MasterListSlot';
import moment from 'moment';
import Modal from '../../Modal/index';
import Button from "../../Button";
const squareTarget = {
drop(props) {
props.transferTraining(props.value); // drag and drop
console.log('DROP props :', props);
//moveKnight(props.x, props.y);
},
// hover(props, monitor, component) {
// // This is fired very often and lets you perform side effects
// // in response to the hover. You can't handle enter and leave
// // here—if you need them, put monitor.isOver() into collect() so you
// // can just use componentDidUpdate() to handle enter/leave.
// }
};
function collect(connect, monitor){
return{
connectDropTarget: connect.dropTarget(),
hovered: monitor.isOver(),
item: monitor.getItem(),
}
}
class TimeSlotGroup extends Component {
static propTypes = {
dayWrapperComponent: elementType,
timeslots: PropTypes.number.isRequired,
step: PropTypes.number.isRequired,
value: PropTypes.instanceOf(Date).isRequired,
showLabels: PropTypes.bool,
isNow: PropTypes.bool,
slotPropGetter: PropTypes.func,
timeGutterFormat: dateFormat,
culture: PropTypes.string,
resource: PropTypes.string,
}
static defaultProps = {
intervals: [],
timeslots: 2,
step: 30,
isNow: false,
showLabels: false,
freeTrainers: null,
}
constructor(props) {
super(props);
this.state = {
modalWasTransfer: false,
modalTooLateTransfer: false,
}
};
showWasTransferModal = () => {
this.setState({modalWasTransfer: true});
}
showTooLateTransferModal = () => {
this.setState({modalTooLateTransfer: true});
}
renderSlice(slotNumber, content, value) {
const {
dayWrapperComponent,
showLabels,
isNow,
culture,
resource,
slotPropGetter,
showTransferEvent, //my
} = this.props
return (
<TimeSlot
key={slotNumber}
slotNumber={slotNumber}
slotPropGetter={slotPropGetter}
dayWrapperComponent={dayWrapperComponent}
showLabel={showLabels}
content={content}
culture={culture}
isNow={isNow}
resource={resource}
value={value}
showTransferEvent={showTransferEvent}
/>
)
}
renderSlices() {
const ret = []
const sliceLength = this.props.step
let sliceValue = this.props.value;
for (let i = 0; i < this.props.timeslots; i++) {
const content = localizer.format(
sliceValue,
'HH:mm',
this.props.culture
);
ret.push(this.renderSlice(i, content, sliceValue))
sliceValue = date.add(sliceValue, sliceLength, 'minutes')
}
return ret
}
renderEvent = () => {
let {
events,
showTransferEvent,
freeTrainers,
setChoosenTrainer,
showLabels,
handleDrop,
onCancelTraining,
trainerTraining,
mode,
onGotoPage,
isPushBtnTransfer,
} = this.props;
const valueTime = this.props.value.getTime()
for( let i = 0; i < events.length; i++){
if(events[i].start.getTime() === valueTime && showLabels) {
return (
<EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
event={events[i]}
showTransferEvent={showTransferEvent}
setChoosenTrainer={this.props.setChoosenTrainer}
freeTrainers={freeTrainers}Б
idEvent={events[i].start.getTime()}
handleDrop={handleDrop}
setAbonement_Training = {this.props.setAbonement_Training}
onCancelTraining = {onCancelTraining}
mode = {mode}
onGotoPage = {onGotoPage}
isPushBtnTransfer = {isPushBtnTransfer}
deleteTraining = {this.props.deleteTraining}
deleteEventApiPatient={this.props.deleteEventApiPatient}
clickOnEvent={this.props.clickOnEvent}
selectIdEvent={this.props.selectIdEvent}
showTooLateTransferModal={this.showTooLateTransferModal}
showWasTransferModal={this.showWasTransferModal}
/>)
}
}
if(freeTrainers && freeTrainers.idEvent === this.props.value.getTime() && !showLabels){ // рендер выпадающего списка freeTrainer
return <EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
showTransferEvent={showTransferEvent}
freeTrainers={freeTrainers}
setChoosenTrainer={this.props.setChoosenTrainer}
idEvent={freeTrainers.idEvent}
onGotoPage = {onGotoPage}
deleteEventApiPatient = {this.props.deleteEventApiPatient}
/>
}
return null;
}
renderMasterList = () => {
const {masterList, value, showMasterList} = this.props;
let freetrainers = [];
let busytrainers = [];
for(let elem in masterList){
if(elem === 'freetrainers') {
freetrainers = masterList[elem]
}
if(elem === 'busytrainers'){
busytrainers = masterList[elem]
}
}
if(freetrainers.length || busytrainers.length)
return (
<MasterListSlot
key={value.getTime()}
freetrainers = {freetrainers}
busytrainers = {busytrainers}
value = {value.getTime()}
showMasterList = {showMasterList}
/>
)
}
showModalTransferEvent = (idValue) => {
this.props.showModalTransferEvent(idValue);
}
render() {
//drag and drop
|
const isViewTrainer = (freeTrainers && freeTrainers.idEvent === this.props.value.getTime()) ? true : false;//не OK если таместь freeTrainers
const currentEvent = this.renderEvent();
let cellClass = cn('rbc-timeslot-group', flag && !isViewTrainer && !currentEvent ? 'rbc-timeslot-group-OK' : 'rbc-timeslot-group-NOT');
const modalTransferEvent = flag && !isViewTrainer && !currentEvent ? this.showModalTransferEvent : () => {}; // перенос тренировки
if(isAdmin) {
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{this.renderMasterList()}
</div>
)
}
if(flag && !isViewTrainer && !currentEvent){
return connectDropTarget(
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
</div>
)
}
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
<Modal
title='Сообщение'
visible={this.state.modalTooLateTransfer}
onCancel={() => this.setState({modalTooLateTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Переносить тренировку можно только за 24 часа до тренировки
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalTooLateTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
<Modal
title='Сообщение'
visible={this.state.modalWasTransfer}
onCancel={() => this.setState({modalWasTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
| const { connectDropTarget, hovered, item} = this.props;
const backgroundColor= hovered ? '#e8f8fc ' : 'white';
const {intervals, value, freeTrainers, isAdmin} = this.props;
let valuetM = value.getTime();
const flag = Array.isArray(intervals) ? intervals.some(el => {
if(Array.isArray(el.intervals)){
for(let i = 0; i < el.intervals.length; i++){
if(value.getTime() >= el.intervals[i].start*1000 && value.getTime() < el.intervals[i].end * 1000)
return true
}
}
else{
if((valuetM >= el.start*1000) && valuetM < (el.end * 1000)) return true
}
}) : null | identifier_body |
TimeSlotGroup.js | izer'
import { elementType, dateFormat } from './utils/propTypes'
import EventSlot from './EventSlot';
import { DropTarget } from 'react-dnd';
import MasterListSlot from './MasterListSlot';
import moment from 'moment';
import Modal from '../../Modal/index';
import Button from "../../Button";
const squareTarget = {
| (props) {
props.transferTraining(props.value); // drag and drop
console.log('DROP props :', props);
//moveKnight(props.x, props.y);
},
// hover(props, monitor, component) {
// // This is fired very often and lets you perform side effects
// // in response to the hover. You can't handle enter and leave
// // here—if you need them, put monitor.isOver() into collect() so you
// // can just use componentDidUpdate() to handle enter/leave.
// }
};
function collect(connect, monitor){
return{
connectDropTarget: connect.dropTarget(),
hovered: monitor.isOver(),
item: monitor.getItem(),
}
}
class TimeSlotGroup extends Component {
static propTypes = {
dayWrapperComponent: elementType,
timeslots: PropTypes.number.isRequired,
step: PropTypes.number.isRequired,
value: PropTypes.instanceOf(Date).isRequired,
showLabels: PropTypes.bool,
isNow: PropTypes.bool,
slotPropGetter: PropTypes.func,
timeGutterFormat: dateFormat,
culture: PropTypes.string,
resource: PropTypes.string,
}
static defaultProps = {
intervals: [],
timeslots: 2,
step: 30,
isNow: false,
showLabels: false,
freeTrainers: null,
}
constructor(props) {
super(props);
this.state = {
modalWasTransfer: false,
modalTooLateTransfer: false,
}
};
showWasTransferModal = () => {
this.setState({modalWasTransfer: true});
}
showTooLateTransferModal = () => {
this.setState({modalTooLateTransfer: true});
}
renderSlice(slotNumber, content, value) {
const {
dayWrapperComponent,
showLabels,
isNow,
culture,
resource,
slotPropGetter,
showTransferEvent, //my
} = this.props
return (
<TimeSlot
key={slotNumber}
slotNumber={slotNumber}
slotPropGetter={slotPropGetter}
dayWrapperComponent={dayWrapperComponent}
showLabel={showLabels}
content={content}
culture={culture}
isNow={isNow}
resource={resource}
value={value}
showTransferEvent={showTransferEvent}
/>
)
}
renderSlices() {
const ret = []
const sliceLength = this.props.step
let sliceValue = this.props.value;
for (let i = 0; i < this.props.timeslots; i++) {
const content = localizer.format(
sliceValue,
'HH:mm',
this.props.culture
);
ret.push(this.renderSlice(i, content, sliceValue))
sliceValue = date.add(sliceValue, sliceLength, 'minutes')
}
return ret
}
renderEvent = () => {
let {
events,
showTransferEvent,
freeTrainers,
setChoosenTrainer,
showLabels,
handleDrop,
onCancelTraining,
trainerTraining,
mode,
onGotoPage,
isPushBtnTransfer,
} = this.props;
const valueTime = this.props.value.getTime()
for( let i = 0; i < events.length; i++){
if(events[i].start.getTime() === valueTime && showLabels) {
return (
<EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
event={events[i]}
showTransferEvent={showTransferEvent}
setChoosenTrainer={this.props.setChoosenTrainer}
freeTrainers={freeTrainers}Б
idEvent={events[i].start.getTime()}
handleDrop={handleDrop}
setAbonement_Training = {this.props.setAbonement_Training}
onCancelTraining = {onCancelTraining}
mode = {mode}
onGotoPage = {onGotoPage}
isPushBtnTransfer = {isPushBtnTransfer}
deleteTraining = {this.props.deleteTraining}
deleteEventApiPatient={this.props.deleteEventApiPatient}
clickOnEvent={this.props.clickOnEvent}
selectIdEvent={this.props.selectIdEvent}
showTooLateTransferModal={this.showTooLateTransferModal}
showWasTransferModal={this.showWasTransferModal}
/>)
}
}
if(freeTrainers && freeTrainers.idEvent === this.props.value.getTime() && !showLabels){ // рендер выпадающего списка freeTrainer
return <EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
showTransferEvent={showTransferEvent}
freeTrainers={freeTrainers}
setChoosenTrainer={this.props.setChoosenTrainer}
idEvent={freeTrainers.idEvent}
onGotoPage = {onGotoPage}
deleteEventApiPatient = {this.props.deleteEventApiPatient}
/>
}
return null;
}
renderMasterList = () => {
const {masterList, value, showMasterList} = this.props;
let freetrainers = [];
let busytrainers = [];
for(let elem in masterList){
if(elem === 'freetrainers') {
freetrainers = masterList[elem]
}
if(elem === 'busytrainers'){
busytrainers = masterList[elem]
}
}
if(freetrainers.length || busytrainers.length)
return (
<MasterListSlot
key={value.getTime()}
freetrainers = {freetrainers}
busytrainers = {busytrainers}
value = {value.getTime()}
showMasterList = {showMasterList}
/>
)
}
showModalTransferEvent = (idValue) => {
this.props.showModalTransferEvent(idValue);
}
render() {
//drag and drop
const { connectDropTarget, hovered, item} = this.props;
const backgroundColor= hovered ? '#e8f8fc ' : 'white';
const {intervals, value, freeTrainers, isAdmin} = this.props;
let valuetM = value.getTime();
const flag = Array.isArray(intervals) ? intervals.some(el => {
if(Array.isArray(el.intervals)){
for(let i = 0; i < el.intervals.length; i++){
if(value.getTime() >= el.intervals[i].start*1000 && value.getTime() < el.intervals[i].end * 1000)
return true
}
}
else{
if((valuetM >= el.start*1000) && valuetM < (el.end * 1000)) return true
}
}) : null
const isViewTrainer = (freeTrainers && freeTrainers.idEvent === this.props.value.getTime()) ? true : false;//не OK если таместь freeTrainers
const currentEvent = this.renderEvent();
let cellClass = cn('rbc-timeslot-group', flag && !isViewTrainer && !currentEvent ? 'rbc-timeslot-group-OK' : 'rbc-timeslot-group-NOT');
const modalTransferEvent = flag && !isViewTrainer && !currentEvent ? this.showModalTransferEvent : () => {}; // перенос тренировки
if(isAdmin) {
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{this.renderMasterList()}
</div>
)
}
if(flag && !isViewTrainer && !currentEvent){
return connectDropTarget(
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
</div>
)
}
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
<Modal
title='Сообщение'
visible={this.state.modalTooLateTransfer}
onCancel={() => this.setState({modalTooLateTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Переносить тренировку можно только за 24 часа до тренировки
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalTooLateTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
<Modal
title='Сообщение'
visible={this.state.modalWasTransfer}
onCancel={() => this.setState({modalWasTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
| drop | identifier_name |
TimeSlotGroup.js | izer'
import { elementType, dateFormat } from './utils/propTypes'
import EventSlot from './EventSlot';
import { DropTarget } from 'react-dnd';
import MasterListSlot from './MasterListSlot';
import moment from 'moment';
import Modal from '../../Modal/index';
import Button from "../../Button";
const squareTarget = {
drop(props) {
props.transferTraining(props.value); // drag and drop
console.log('DROP props :', props);
//moveKnight(props.x, props.y);
},
// hover(props, monitor, component) {
// // This is fired very often and lets you perform side effects
// // in response to the hover. You can't handle enter and leave
// // here—if you need them, put monitor.isOver() into collect() so you
// // can just use componentDidUpdate() to handle enter/leave.
// }
};
function collect(connect, monitor){
return{
connectDropTarget: connect.dropTarget(),
hovered: monitor.isOver(),
item: monitor.getItem(),
}
}
class TimeSlotGroup extends Component {
static propTypes = {
dayWrapperComponent: elementType,
timeslots: PropTypes.number.isRequired,
step: PropTypes.number.isRequired,
value: PropTypes.instanceOf(Date).isRequired,
showLabels: PropTypes.bool,
isNow: PropTypes.bool,
slotPropGetter: PropTypes.func,
timeGutterFormat: dateFormat,
culture: PropTypes.string,
resource: PropTypes.string,
}
static defaultProps = {
intervals: [],
timeslots: 2,
step: 30,
isNow: false,
showLabels: false,
freeTrainers: null,
}
constructor(props) {
super(props);
this.state = {
modalWasTransfer: false,
modalTooLateTransfer: false,
}
};
showWasTransferModal = () => {
this.setState({modalWasTransfer: true});
}
showTooLateTransferModal = () => {
this.setState({modalTooLateTransfer: true});
}
renderSlice(slotNumber, content, value) {
const {
dayWrapperComponent,
showLabels,
isNow,
culture,
resource,
slotPropGetter,
showTransferEvent, //my
} = this.props
return (
<TimeSlot
key={slotNumber}
slotNumber={slotNumber}
slotPropGetter={slotPropGetter}
dayWrapperComponent={dayWrapperComponent}
showLabel={showLabels}
content={content}
culture={culture}
isNow={isNow}
resource={resource}
value={value}
showTransferEvent={showTransferEvent}
/>
)
}
renderSlices() {
const ret = []
const sliceLength = this.props.step
let sliceValue = this.props.value;
for (let i = 0; i < this.props.timeslots; i++) {
const content = localizer.format(
sliceValue,
'HH:mm',
this.props.culture
);
ret.push(this.renderSlice(i, content, sliceValue))
sliceValue = date.add(sliceValue, sliceLength, 'minutes')
}
return ret
}
renderEvent = () => {
let {
events,
showTransferEvent,
freeTrainers,
setChoosenTrainer,
showLabels,
handleDrop,
onCancelTraining,
trainerTraining,
mode,
onGotoPage,
isPushBtnTransfer,
} = this.props;
const valueTime = this.props.value.getTime()
for( let i = 0; i < events.length; i++){
if(events[i].start.getTime() === valueTime && showLabels) {
return (
<EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
event={events[i]}
showTransferEvent={showTransferEvent}
setChoosenTrainer={this.props.setChoosenTrainer}
freeTrainers={freeTrainers}Б
idEvent={events[i].start.getTime()}
handleDrop={handleDrop}
setAbonement_Training = {this.props.setAbonement_Training}
onCancelTraining = {onCancelTraining}
mode = {mode}
onGotoPage = {onGotoPage}
isPushBtnTransfer = {isPushBtnTransfer}
deleteTraining = {this.props.deleteTraining}
deleteEventApiPatient={this.props.deleteEventApiPatient}
clickOnEvent={this.props.clickOnEvent}
selectIdEvent={this.props.selectIdEvent}
showTooLateTransferModal={this.showTooLateTransferModal}
showWasTransferModal={this.showWasTransferModal}
/>)
}
}
if(freeTrainers && freeTrainers.idEvent === this.props.value.getTime() && !showLabels){ // рендер выпадающего списка freeTrainer
return <EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
showTransferEvent={showTransferEvent}
freeTrainers={freeTrainers}
setChoosenTrainer={this.props.setChoosenTrainer}
idEvent={freeTrainers.idEvent}
onGotoPage = {onGotoPage}
deleteEventApiPatient = {this.props.deleteEventApiPatient}
/>
}
return null;
}
renderMasterList = () => {
const {masterList, value, showMasterList} = this.props;
let freetrainers = [];
let busytrainers = [];
for(let elem in masterList){
if(elem === 'freetrainers') {
freetrainers = masterList[elem]
}
if(elem === 'busytrainers'){
busytrainers = masterList[elem]
}
}
if(freetrainers.length || busytrainers.length)
return (
<MasterListSlot
key={value.getTime()}
freetrainers = {freetrainers}
busytrainers = {busytrainers}
value = {value.getTime()}
showMasterList = {showMasterList}
/>
)
}
showModalTransferEvent = (idValue) => {
this.props.showModalTransferEvent(idValue);
}
render() {
//drag and drop
const { connectDropTarget, hovered, item} = this.props;
const backgroundColor= hovered ? '#e8f8fc ' : 'white';
const {intervals, value, freeTrainers, isAdmin} = this.props;
let valuetM = value.getTime();
const flag = Array.isArray(intervals) ? intervals.some(el => {
if(Array.isArray(el.intervals)){
for(let i = 0; i < el.intervals.length; i++){
if(value.getTime() >= el.intervals[i].start*1000 && value.getTime() < el.intervals[i].end * 1000)
return true
}
}
else{
if((valuetM >= el.start*1000) && valuetM < (el.end * 1000)) return true
}
}) : null
const isViewTrainer = (freeTrainers && freeTrainers.idEvent === this.props.value.getTime()) ? true : false;//не OK если таместь freeTrainers
const currentEvent = this.renderEvent();
let cellClass = cn('rbc-timeslot-group', flag && !isViewTrainer && !currentEvent ? 'rbc-timeslot-group-OK' : 'rbc-timeslot-group-NOT');
const modalTransferEvent = flag && !isViewTrainer && !currentEvent ? this.showModalTransferEvent : () => {}; // перенос тренировки
if(isAdmin) {
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{this.renderMasterList()}
</div>
)
} | return connectDropTarget(
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
</div>
)
}
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
<Modal
title='Сообщение'
visible={this.state.modalTooLateTransfer}
onCancel={() => this.setState({modalTooLateTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Переносить тренировку можно только за 24 часа до тренировки
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalTooLateTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
<Modal
title='Сообщение'
visible={this.state.modalWasTransfer}
onCancel={() => this.setState({modalWasTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text |
if(flag && !isViewTrainer && !currentEvent){ | random_line_split |
semaphore.rs | [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
}
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) |
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let | {
return Ok(SemaphoreGuard::new(self, amount));
} | conditional_block |
semaphore.rs | [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
}
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn | (&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) {
return Ok(SemaphoreGuard::new(self, amount));
}
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let guard | try_acquire | identifier_name |
semaphore.rs | [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result |
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) {
return Ok(SemaphoreGuard::new(self, amount));
}
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let | {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
} | identifier_body |
semaphore.rs | [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
}
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) {
return Ok(SemaphoreGuard::new(self, amount));
}
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// } | pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let guard = | /// ``` | random_line_split |
14.rs | on until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
}
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
}
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn | (size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters | print_small_grid | identifier_name |
14.rs | how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
}
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
}
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters.grid.len();
// print_small_grid(10, &occupied);
for i in 0..len {
let jlen = clusters.grid[i].len();
for j in 0..jlen {
let val = clusters.state(&Loc(i, j));
if occupied[i][j] {
let mut adj_clusters = vec![];
for o in [-1, 1].iter() {
let it = (i as i64) + *o;
let jt = (j as i64) + *o;
if it >= 0 && it < len as i64 {
let loc = Loc(it as usize, j);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
if jt >= 0 && jt < jlen as i64 {
let loc = Loc(i, jt as usize);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
}
if adj_clusters.len() > 0 {
let min = adj_clusters.iter().clone().min().unwrap();
for id in adj_clusters.iter() {
clusters.merge_clusters(*min, &id);
}
clusters.add_to_cluster(Loc(i, j), *min);
} else | {
clusters.new_cluster(Loc(i, j));
} | conditional_block | |
14.rs | until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> |
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
}
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters | {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
} | identifier_body |
14.rs | on until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
}
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
| }
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters | random_line_split | |
tsne2.js | = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X[0].length;
assert(N > 0, ' X is empty? You must have some data!');
assert(D > 0, ' X[0] is empty? Where is the data?');
const pairwiseDistancesOfInput = pairwiseDistances(X); // convert X to distances using gaussian kernel
this.P = d2p(pairwiseDistancesOfInput, this.perplexity, 1e-4); // attach to object
this.N = N; // back up the size of the dataset
this.initSolution(); // refresh this
},
// this function takes a given distance matrix and creates
// matrix P from them.
// D is assumed to be provided as a list of lists, and should be symmetric
initDataDist(distancesMatrix) {
const N = distancesMatrix.length;
assert(N > 0, ' X is empty? You must have some data!');
// convert D to a (fast) typed array version
const convertedDistances = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = distancesMatrix[i][j];
convertedDistances[i * N + j] = d;
convertedDistances[j * N + i] = d;
}
}
this.P = d2p(convertedDistances, this.perplexity, 1e-4);
this.N = N;
this.initSolution(); // refresh this
},
// (re)initializes the solution to random
initSolution() {
// generate random solution to t-SNE
this.solution = randn2d(this.N, this.dim); // the solution
this.gains = randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions
this.ystep = randn2d(this.N, this.dim, 0.0); // momentum accumulator
this.iter = 0;
},
// return pointer to curren | t solution
getSolution() {
return this.solution;
},
// perform a single step of optimization to improve the embedding
step() {
this.iter += 1;
const N = this.N;
const cg = this.costAndGradient(this.solution); // evaluate gradient
const cost = cg.cost;
const grad = cg.grad;
/ | identifier_body | |
tsne2.js | ;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function | (X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X[0].length;
assert(N > 0, ' X is empty? You must have some data!');
assert(D > 0, ' X[0] is empty? Where is the data?');
const pairwiseDistancesOfInput = pairwiseDistances(X); // convert X to distances using gaussian kernel
this.P = d2p(pairwiseDistancesOfInput, this.perplexity, 1e-4); // attach to object
this.N = N; // back up the size of the dataset
this.initSolution(); // refresh this
},
// this function takes a given distance matrix and creates
// matrix P from them.
// D is assumed to be provided as a list of lists, and should be symmetric
initDataDist(distancesMatrix) {
const N = distancesMatrix.length;
assert(N > 0, ' X is empty? You must have some data!');
// convert D to a (fast) typed array version
const convertedDistances = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = distancesMatrix[i][j];
convertedDistances[i * N + j] = d;
convertedDistances[j * N + i] = d;
}
}
this.P = d2p(convertedDistances, this.perplexity, 1e-4);
this.N = N;
this.initSolution(); // refresh this
},
// (re)initializes the solution to random
initSolution() {
// generate random solution to t-SNE
this.solution = randn2d(this.N, this.dim); // the solution
this.gains = randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions | pairwiseDistances | identifier_name |
tsne2.js |
// utilitity that creates contiguous vector of zeros of size n
function zeros(n) {
if (typeof (n) === 'undefined' || isNaN(n)) { return []; }
if (typeof ArrayBuffer === 'undefined') {
// lacking browser support
const arr = new Array(n);
for (let i = 0; i < n; i++) { arr[i] = 0; }
return arr;
}
return new Float64Array(n); // typed arrays are faster
}
// return 0 mean unit standard deviation random number
let returnCache = false;
let gaussCache = 0.0;
function gaussRandom() {
if (returnCache) {
returnCache = false;
return gaussCache;
}
const u = (2 * Math.random()) - 1;
const v = (2 * Math.random()) - 1;
const r = (u * u) + (v * v);
if (r === 0 || r > 1) return gaussRandom();
const c = Math.sqrt((-2 * Math.log(r)) / r);
gaussCache = v * c; // cache this for next function call for efficiency
returnCache = true;
return u * c;
}
// return random normal number
function randn(mu, std) {
return mu + (gaussRandom() * std);
}
// utility that returns 2d array filled with random numbers
// or with value s, if provided
function randn2d(n, d, s) {
const uses = typeof s !== 'undefined';
const x = [];
for (let i = 0; i < n; i++) {
const xhere = [];
for (let j = 0; j < d; j++) {
if (uses) {
xhere.push(s);
} else {
xhere.push(randn(0.0, 1e-4));
}
}
x.push(xhere);
}
return x;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function pairwiseDistances(X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
| const tsnejs = window.tsnejs || { REVISION: 'ALPHA' };
function assert(condition, message) {
if (!condition) { throw message || 'Assertion failed'; }
} | random_line_split | |
tsne2.js |
}
// utilitity that creates contiguous vector of zeros of size n
function zeros(n) {
if (typeof (n) === 'undefined' || isNaN(n)) { return []; }
if (typeof ArrayBuffer === 'undefined') {
// lacking browser support
const arr = new Array(n);
for (let i = 0; i < n; i++) { arr[i] = 0; }
return arr;
}
return new Float64Array(n); // typed arrays are faster
}
// return 0 mean unit standard deviation random number
let returnCache = false;
let gaussCache = 0.0;
function gaussRandom() {
if (returnCache) {
returnCache = false;
return gaussCache;
}
const u = (2 * Math.random()) - 1;
const v = (2 * Math.random()) - 1;
const r = (u * u) + (v * v);
if (r === 0 || r > 1) return gaussRandom();
const c = Math.sqrt((-2 * Math.log(r)) / r);
gaussCache = v * c; // cache this for next function call for efficiency
returnCache = true;
return u * c;
}
// return random normal number
function randn(mu, std) {
return mu + (gaussRandom() * std);
}
// utility that returns 2d array filled with random numbers
// or with value s, if provided
function randn2d(n, d, s) {
const uses = typeof s !== 'undefined';
const x = [];
for (let i = 0; i < n; i++) {
const xhere = [];
for (let j = 0; j < d; j++) {
if (uses) {
xhere.push(s);
} else {
xhere.push(randn(0.0, 1e-4));
}
}
x.push(xhere);
}
return x;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function pairwiseDistances(X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X | { throw message || 'Assertion failed'; } | conditional_block | |
fiUnam.py | es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb | def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if os.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1],i_lastcluster+1)
self.over(inodes[j+1],i_lastcluster+1)
inodes[j].finit_cluster=i_lastcluster+1
def over(self,inode,newcluster):
fe_cluster = str(newcluster)
cluster_zeros = inode.offset_fcluster- len(fe_cluster)
ptrb = self.sb.size_cluster + inode.numdir*self.sb.size_dentry+25
self.fs_map[ptrb:ptrb+inode.offset_fcluster]= | = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
| conditional_block |
fiUnam.py | :
"""
El superbloque para este sistema de archivos ocupa el primer cluster
del mismo, es decir, ocupa 2048
"""
f = open('fiunamfs.img','r+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_READ)
# Información de superbloque
name = fs_map[0:8].decode('utf-8') # FiUnamFS
version = fs_map[10:13].decode('utf-8') # 0.4
tagv = fs_map[20:35].decode('utf-8') # Mi Sistema
size_cluster = int(fs_map[40:45].decode('utf-8')) # 2048
numdir_cluster = int(fs_map[47:49].decode('utf-8')) # 04
total_cluster = int(fs_map[52:60].decode('utf-8')) # 00000720
size_dentry = 64 # size dir entry
f.close()
fs_map.close()
class DIRENTRY :
"""
De hecho, estrictamente esta clase no es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if os.path.isfile(fe):
if len(fe)<15:
if self | SuperBlock | identifier_name | |
fiUnam.py | es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if o | def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1],i_lastcluster+1)
self.over(inodes[j+1],i_lastcluster+1)
inodes[j].finit_cluster=i_lastcluster+1
def over(self,inode,newcluster):
fe_cluster = str(newcluster)
cluster_zeros = inode.offset_fcluster- len(fe_cluster)
ptrb = self.sb.size_cluster + inode.numdir*self.sb.size_dentry+25
self.fs_map[ptrb:ptrb+inode.offset_fcluster | s.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
| identifier_body |
fiUnam.py | numdir_cluster = int(fs_map[47:49].decode('utf-8')) # 04
total_cluster = int(fs_map[52:60].decode('utf-8')) # 00000720
size_dentry = 64 # size dir entry
f.close()
fs_map.close()
class DIRENTRY :
"""
De hecho, estrictamente esta clase no es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if os.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1], | random_line_split | ||
boilerplate.py | ):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
|
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch | current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win | identifier_body |
boilerplate.py | self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch.sum(target, 1)
tprecision = ttp_sum / tpred_sum
trecall = ttp_sum / ttrue_sum
f2 = ((1 + 4) * tprecision * trecall) / (4 * tprecision + trecall)
return f2
def validate(val_loader, model, criterion, activation=torch.sigmoid):
logger.info('Validating model')
batch_time = AverageMeter()
losses = AverageMeter()
f2s = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute f2
f2 = compute_f2(activation(output), target_var).mean()
f2s.update(f2.data[0], input.size(0))
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# measure elapsed time | batch_time.update(time.time() - end) | random_line_split | |
boilerplate.py | ):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
|
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch | logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True | conditional_block |
boilerplate.py | ):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win
class AverageMeter(object):
"""Computes and stores the average and current value"""
def | (self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = | __init__ | identifier_name |
hangman.py | = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed): | lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2)
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this one | '''
secretWord: string, the word the user is guessing | random_line_split |
hangman.py | loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
|
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this | L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2) | identifier_body |
hangman.py | the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2)
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this one.
if secretWord == getGuessedWord(secretWord, lettersGuessed):#If the secretWord is equal to the actual word through this function
wordGuessed = True #then the wordGuessed is True and you are done.
break
print(('You have ') + str(mistakesMade) + (' guesses left.'))
print(('Available letters: ') + getAvailableLetters(lettersGuessed))
#This tells the players how many guesses they have by inserting the number of how many guesses are left.
guess = input(('Please guess a letter: ').lower()) #this allows the user to guess a letter and make it into a lowercase just incase.
if guess in secretWord: #If the guess is in the secretWord it would move to the next two if statements.
if guess in lettersGuessed: #If letter has already been guessed
| print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------')) | conditional_block | |
hangman.py | ():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2)
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len | loadWords | identifier_name | |
mn-map.component.ts | () color:string;
@Input() size:string;
@Input() data:any;
@Input() set geo_data(value){
if (value){
this.data = value;
this.parent.redraw();
}
}
@Output() datachange = new EventEmitter<any>();
constructor(@Inject(forwardRef(() => MarkerLayer)) private parent:MarkerLayer){}
addMarker(lyr){
let m = this.get_marker();
if (m != null){
lyr.addLayer(m);
m.openPopup();
}
}
get_marker(){
if (this.data == null){
if (this.lat !== undefined)
return L.marker([this.lat, this.lon]);
else return null;
} else {
if (this.data.geometry) {
if (this.data.geometry.coordinates[0] != 0) {
let pop = "<div><h3>"+this.data.properties.RagioneSociale+"</h3><p>"+this.data.properties.Indirizzo+", "+this.data.properties.Frazione + " "+this.data.properties.Comune+"</p></div>";
return L.marker(this.data.geometry.coordinates).bindPopup(pop).openPopup();
}
}
}
}
}
/**
* Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[markers]',
})
export class MarkerLayer extends LeafLayerBase{
@Input() name:string;
@ContentChildren(Marker) dataLayers: QueryList<Marker>;
layer;
getLayer(){
this.layer = L.featureGroup();
this.redraw();
return this.layer;
}
redraw(){
this.layer.clearLayers();
this.dataLayers.forEach(element => {
element.addMarker(this.layer);
});
}
isBase(){
return false;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'mapboxlayer',
})
export class MapboxLayer extends LeafLayerBase{
@Input() name:string;
@Input() owner:string;
@Input() id:string;
@Input() token:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
let url = "https://api.mapbox.com/styles/v1/"+this.owner+"/"+this.id+"/tiles/256/{z}/{x}/{y}?access_token="+this.token;
console.log(url);
let attribution = "";
return L.tileLayer(url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: attribution});
}
isBase(){
return true;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'tile_layer',
})
export class BaseLayer extends LeafLayerBase{
@Input() name:string;
@Input() url:string;
@Input() attribution:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
/**
* Standard Tile Layer
* @param name: one of "osm", "bing", "google", ""
*/
@Directive({
selector: 'namedlayer',
})
export class NamedLayer extends LeafLayerBase {
@Input() layer:string;
configs = {
osms:{name:"OpenStreetMap", url:"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
osm:{name:"OpenStreetMap", url:"http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
positron:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
darkmatter:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
};
getLayer(){
if(Object.keys(this.configs).indexOf(this.layer) >= 0){
let lyr = this.configs[this.layer];
return L.tileLayer(lyr.url, {minZoom: lyr.minzoom, maxZoom: lyr.maxzoom, attribution: lyr.attribution});
}
return null;
}
isBase(){
return true;
}
getName(){
if(this.layer in this.configs){
return this.configs[this.layer].name;
}
return "";
}
}
@Directive({
selector: 'datalayer',
})
export class DataLayer extends LeafLayerBase {
@Input() type:string;
@Input() mode:string;
@Input() src:string;
@Input() aggregator:string;
@Input() field:string;
@Input() basestyle:any={};
@Input() propertystyle:any={};
@Input() styledproperty:string;
@Output() areaclick = new EventEmitter<any>();
constructor(private http:Http){
super();
}
the_style(basestyle, styledproperty, propertystyle){
return function(feature){
let gstyle = basestyle;
let v = feature.properties[styledproperty];
let astyle = propertystyle[v];
Object.assign(gstyle, astyle);
return gstyle;
}
}
getLayer():Promise<L.Layer>{
if (this.type == "geojson")
return new Promise<L.Layer>((resolve, react) =>{
this.http.get(this.aggregator).toPromise().then(x=>{
console.log(x);
resolve(L.geoJSON(x.json(), {
style:this.the_style(this.basestyle, this.styledproperty, this.propertystyle),
onEachFeature:(feature, lyr) => {
lyr.on({
click:(e)=>{
this.areaclick.emit({
field:feature.properties[this.field],
feature:feature
});
}
});
}
}));
});
});
return null;
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls.push(x);
});
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'cityosbglayer',
})
export class CityOSBackgroundLayer extends LeafLayerBase{
@Input() conf:any;
name:string;
url:string;
attribution:string;
minzoom:number = 1;
maxzoom:number = 20;
ngOnInit(){
this.name = this.conf.name;
this.url = this.conf.url;
this.attribution = this.conf.attribution;
}
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
@Directive({
selector: 'cityoslayer',
})
export class CityOSLayer extends LeafLayerBase {
@Input() mappingSpace:number;
@Output() itemclick = new EventEmitter<any>();
items;
styles;
int_styles = {}
constructor(private bms:BackendManagerService){
|
getLayer():Promise<L.Layer>{
return new Promise<L.Layer>((resolve, react) =>{
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/styles").getAll().then(s=>{
this.styles = s;
for(let style of this.styles){
this.int_styles[style.slug] = style;
}
console.log(this.int_styles);
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/geolocations").getAll().then(x=>{
console.log(x);
let geoj = L.geoJSON(x , {
style:(feature =>{
return this.int_styles[feature.properties.types[0]];
}),
});
resolve( geoj );
});
});
});
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls["CityOS"] = x;
});
}
}
@Component({
selector: '[mn-map]',
templateUrl: './mn-map.component.html',
styleUrls: ['./mn-map.component.css'],
})
export class MnMapComponent {
private makeid() {
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz012345678 | super();
}
| identifier_body |
mn-map.component.ts | export abstract class LeafLayerBase implements LeafLayer{
abstract getLayer():L.Layer|Promise<L.Layer>;
abstract isBase():boolean;
protected name:string;
getName():string{
return this.name;
}
addToMap(m, bls, dls){
let l = this.getLayer();
m.addLayer(l);
if(this.isBase())
bls[this.getName()] = l;
else
dls[this.getName()] = l;
}
}
/**
* Marker for Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[marker]',
})
export class Marker{
@Input() lon:number;
@Input() lat:number;
@Input() icon:string;
@Input() color:string;
@Input() size:string;
@Input() data:any;
@Input() set geo_data(value){
if (value){
this.data = value;
this.parent.redraw();
}
}
@Output() datachange = new EventEmitter<any>();
constructor(@Inject(forwardRef(() => MarkerLayer)) private parent:MarkerLayer){}
addMarker(lyr){
let m = this.get_marker();
if (m != null){
lyr.addLayer(m);
m.openPopup();
}
}
get_marker(){
if (this.data == null){
if (this.lat !== undefined)
return L.marker([this.lat, this.lon]);
else return null;
} else {
if (this.data.geometry) {
if (this.data.geometry.coordinates[0] != 0) {
let pop = "<div><h3>"+this.data.properties.RagioneSociale+"</h3><p>"+this.data.properties.Indirizzo+", "+this.data.properties.Frazione + " "+this.data.properties.Comune+"</p></div>";
return L.marker(this.data.geometry.coordinates).bindPopup(pop).openPopup();
}
}
}
}
}
/**
* Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[markers]',
})
export class MarkerLayer extends LeafLayerBase{
@Input() name:string;
@ContentChildren(Marker) dataLayers: QueryList<Marker>;
layer;
getLayer(){
this.layer = L.featureGroup();
this.redraw();
return this.layer;
}
redraw(){
this.layer.clearLayers();
this.dataLayers.forEach(element => {
element.addMarker(this.layer);
});
}
isBase(){
return false;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'mapboxlayer',
})
export class MapboxLayer extends LeafLayerBase{
@Input() name:string;
@Input() owner:string;
@Input() id:string;
@Input() token:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
let url = "https://api.mapbox.com/styles/v1/"+this.owner+"/"+this.id+"/tiles/256/{z}/{x}/{y}?access_token="+this.token;
console.log(url);
let attribution = "";
return L.tileLayer(url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: attribution});
}
isBase(){
return true;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'tile_layer',
})
export class BaseLayer extends LeafLayerBase{
@Input() name:string;
@Input() url:string;
@Input() attribution:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
/**
* Standard Tile Layer
* @param name: one of "osm", "bing", "google", ""
*/
@Directive({
selector: 'namedlayer',
})
export class NamedLayer extends LeafLayerBase {
@Input() layer:string;
configs = {
osms:{name:"OpenStreetMap", url:"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
osm:{name:"OpenStreetMap", url:"http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
positron:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
darkmatter:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
};
getLayer(){
if(Object.keys(this.configs).indexOf(this.layer) >= 0){
let lyr = this.configs[this.layer];
return L.tileLayer(lyr.url, {minZoom: lyr.minzoom, maxZoom: lyr.maxzoom, attribution: lyr.attribution});
}
return null;
}
isBase(){
return true;
}
getName(){
if(this.layer in this.configs){
return this.configs[this.layer].name;
}
return "";
}
}
@Directive({
selector: 'datalayer',
})
export class DataLayer extends LeafLayerBase {
@Input() type:string;
@Input() mode:string;
@Input() src:string;
@Input() aggregator:string;
@Input() field:string;
@Input() basestyle:any={};
@Input() propertystyle:any={};
@Input() styledproperty:string;
@Output() areaclick = new EventEmitter<any>();
constructor(private http:Http){
super();
}
the_style(basestyle, styledproperty, propertystyle){
return function(feature){
let gstyle = basestyle;
let v = feature.properties[styledproperty];
let astyle = propertystyle[v];
Object.assign(gstyle, astyle);
return gstyle;
}
}
getLayer():Promise<L.Layer>{
if (this.type == "geojson")
return new Promise<L.Layer>((resolve, react) =>{
this.http.get(this.aggregator).toPromise().then(x=>{
console.log(x);
resolve(L.geoJSON(x.json(), {
style:this.the_style(this.basestyle, this.styledproperty, this.propertystyle),
onEachFeature:(feature, lyr) => {
lyr.on({
click:(e)=>{
this.areaclick.emit({
field:feature.properties[this.field],
feature:feature
});
}
});
}
}));
});
});
return null;
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls.push(x);
});
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'cityosbglayer',
})
export class CityOSBackgroundLayer extends LeafLayerBase{
@Input() conf:any;
name:string;
url:string;
attribution:string;
minzoom:number = 1;
maxzoom:number = 20;
ngOnInit(){
this.name = this.conf.name;
this.url = this.conf.url;
this.attribution = this.conf.attribution;
}
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
@Directive({
selector: 'cityoslayer',
})
export class CityOSLayer extends LeafLayerBase {
@Input() mappingSpace:number;
@Output() itemclick = new EventEmitter<any>();
items;
styles;
int_styles = {}
constructor(private bms:BackendManagerService){
super();
}
getLayer():Promise<L.Layer>{
return new Promise<L.Layer>((resolve, react) =>{
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/styles").getAll().then(s=>{
this.styles = s;
for(let style of this.styles){
this.int_styles[style.slug] = style;
}
console.log(this.int_styles);
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/geolocations").getAll().then(x=>{
console.log(x);
let ge | random_line_split | ||
mn-map.component.ts | () color:string;
@Input() size:string;
@Input() data:any;
@Input() set geo_data(value){
if (value){
this.data = value;
this.parent.redraw();
}
}
@Output() datachange = new EventEmitter<any>();
constructor(@Inject(forwardRef(() => MarkerLayer)) private parent:MarkerLayer){}
addMarker(lyr){
let m = this.get_marker();
if (m != null){
lyr.addLayer(m);
m.openPopup();
}
}
get_marker(){
if (this.data == null){
if (this.lat !== undefined)
return L.marker([this.lat, this.lon]);
else return null;
} else {
if (this.data.geometry) {
if (this.data.geometry.coordinates[0] != 0) {
let pop = "<div><h3>"+this.data.properties.RagioneSociale+"</h3><p>"+this.data.properties.Indirizzo+", "+this.data.properties.Frazione + " "+this.data.properties.Comune+"</p></div>";
return L.marker(this.data.geometry.coordinates).bindPopup(pop).openPopup();
}
}
}
}
}
/**
* Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[markers]',
})
export class MarkerLayer extends LeafLayerBase{
@Input() name:string;
@ContentChildren(Marker) dataLayers: QueryList<Marker>;
layer;
getLayer(){
this.layer = L.featureGroup();
this.redraw();
return this.layer;
}
redraw(){
this.layer.clearLayers();
this.dataLayers.forEach(element => {
element.addMarker(this.layer);
});
}
isBase(){
return false;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'mapboxlayer',
})
export class MapboxLayer extends LeafLayerBase{
@Input() name:string;
@Input() owner:string;
@Input() id:string;
@Input() token:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
let url = "https://api.mapbox.com/styles/v1/"+this.owner+"/"+this.id+"/tiles/256/{z}/{x}/{y}?access_token="+this.token;
console.log(url);
let attribution = "";
return L.tileLayer(url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: attribution});
}
isBase(){
return true;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'tile_layer',
})
export class BaseLayer extends LeafLayerBase{
@Input() name:string;
@Input() url:string;
@Input() attribution:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
/**
* Standard Tile Layer
* @param name: one of "osm", "bing", "google", ""
*/
@Directive({
selector: 'namedlayer',
})
export class NamedLayer extends LeafLayerBase {
@Input() layer:string;
configs = {
osms:{name:"OpenStreetMap", url:"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
osm:{name:"OpenStreetMap", url:"http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
positron:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
darkmatter:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
};
getLayer(){
if(Object.keys(this.configs).indexOf(this.layer) >= 0){
let lyr = this.configs[this.layer];
return L.tileLayer(lyr.url, {minZoom: lyr.minzoom, maxZoom: lyr.maxzoom, attribution: lyr.attribution});
}
return null;
}
isBase(){
return true;
}
getName(){
if(this.layer in this.configs){
return this.configs[this.layer].name;
}
return "";
}
}
@Directive({
selector: 'datalayer',
})
export class DataLayer extends LeafLayerBase {
@Input() type:string;
@Input() mode:string;
@Input() src:string;
@Input() aggregator:string;
@Input() field:string;
@Input() basestyle:any={};
@Input() propertystyle:any={};
@Input() styledproperty:string;
@Output() areaclick = new EventEmitter<any>();
constructor(private http:Http){
super();
}
the_style(basestyle, styledproperty, propertystyle){
return function(feature){
let gstyle = basestyle;
let v = feature.properties[styledproperty];
let astyle = propertystyle[v];
Object.assign(gstyle, astyle);
return gstyle;
}
}
getLayer():Promise<L.Layer>{
if (this.type == "geojson")
return new Promise<L.Layer>((resolve, react) =>{
this.http.get(this.aggregator).toPromise().then(x=>{
console.log(x);
resolve(L.geoJSON(x.json(), {
style:this.the_style(this.basestyle, this.styledproperty, this.propertystyle),
onEachFeature:(feature, lyr) => {
lyr.on({
click:(e)=>{
this.areaclick.emit({
field:feature.properties[this.field],
feature:feature
});
}
});
}
}));
});
});
return null;
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls.push(x);
});
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'cityosbglayer',
})
export class CityOSBackgroundLayer extends LeafLayerBase{
@Input() conf:any;
name:string;
url:string;
attribution:string;
minzoom:number = 1;
maxzoom:number = 20;
ngOnInit(){
this.name = this.conf.name;
this.url = this.conf.url;
this.attribution = this.conf.attribution;
}
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
@Directive({
selector: 'cityoslayer',
})
export class CityOSLayer extends LeafLayerBase {
@Input() mappingSpace:number;
@Output() itemclick = new EventEmitter<any>();
items;
styles;
int_styles = {}
constructor(private bms:BackendManagerService){
super();
}
getLayer():Promise<L.Layer>{
return new Promise<L.Layer>((resolve, react) =>{
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/styles").getAll().then(s=>{
this.styles = s;
for(let style of this.styles){
this.int_styles[style.slug] = style;
}
console.log(this.int_styles);
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/geolocations").getAll().then(x=>{
console.log(x);
let geoj = L.geoJSON(x , {
style:(feature =>{
return this.int_styles[feature.properties.types[0]];
}),
});
resolve( geoj );
});
});
});
}
is | {
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls["CityOS"] = x;
});
}
}
@Component({
selector: '[mn-map]',
templateUrl: './mn-map.component.html',
styleUrls: ['./mn-map.component.css'],
})
export class MnMapComponent {
private makeid() {
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz012345678 | Base() | identifier_name |
world.py | `-. `./ / |/_.'")
print(" | |//")
print(" |_ /")
print(" |- |")
print(" | =|")
print(" | |")
print(" --------------------/ , . \\--------._")
print("\n This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
|
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S | if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp)) | identifier_body |
world.py | (self):
print("\n v . ._, |_ .,")
print(" `-._\\/ . \\ / |/_")
print(" \\ _\\, y | \\//")
print(" _\\_.___\\, \\/ -.\\||")
print(" `7-,--.`._|| / / ,")
print(" /' `-. `./ / |/_.'")
print(" | |//")
print(" |_ /")
print(" |- |")
print(" | =|")
print(" | |")
print(" --------------------/ , . \\--------._")
print("\n This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp))
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
| intro_text | identifier_name | |
world.py | if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp))
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S)ell, or (Q)uit?: ")
if user_input in ['Q', 'q']:
player.room.visited = 0
return
elif user_input in ['B', 'b']:
print("Here's whats available to buy:\n")
self.trade(buyer=player, seller=self.trader)
elif user_input in ['S', 's']:
print("Here's whats available to sell:\n")
self.trade(buyer=self.trader, seller=player)
else:
print("Invalid choice!")
def intro_text(self):
print("\n _________##")
print(" @\\\\\\\\\\\\\\\\\\##")
print(" @@@\\\\\\\\\\\\\\\\##\\")
print(" @@ @@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@@@----------|")
print(" @@ @@@ @@__________|")
print(" @@@@@@@@@__________|")
print(" @@@@ .@@@__________|")
print(" _\|/__@@@@__@@@__________|__")
print("\n Trading Post")
print("\n Press \"T\" to trade")
class FindGoldTile(MapTile):
def __init__(self, x, y):
self.gold = random.randint(20, 75)
self.gold_claimed = False
super().__init__(x, y)
def modify_player(self, player):
if not self.gold_claimed:
self.gold_claimed = True
luc_mod = decimal.Decimal(player.luc_stat / 100)
found_loot = round(self.gold * luc_mod, 0)
player.gold += found_loot
print("\n You found {} gold coins!".format(found_loot))
def intro_text(self):
print("\n |#######=====================#######|")
print(" |#(1)*UNITED STATES OF WHAYEVER*(1)#|")
print(" |#** /===\ ******** **#|")
print(" |*# {G} | (\") | #*|")
print(" |#* ****** | /v\ | O N E *#|")
print(" |#(1) \===/ (1)#|")
print(" |##===========SOME GOLD===========##|")
if self.gold_claimed:
print("\n You've already looted this place!")
else:
print("\n Someone dropped some gold. You pick it up.")
start_tile_location = None
tile_type_dict = {"VT": VictoryTile,
"EN": EnemyTile,
"ST": StartTile,
"NA": BoringTile,
"FG": FindGoldTile,
"TT": TraderTile,
" ": None}
def is_dsl_valid(dsl):
if dsl.count("|ST|") != 1:
return False
if dsl.count("|VT|") == 0:
return False
lines = dsl.splitlines()
lines = [l for l in lines if l]
pipe_counts = [line.count("|") for line in lines]
for count in pipe_counts:
if count != pipe_counts[0]:
return False
return True
def parse_world_dsl(map_file):
world_map = []
level_map = open(map_file, 'r').read()
if not is_dsl_valid(level_map):
sys.exit("Rumtime error: unable to parse map file")
dsl_lines = level_map.splitlines()
dsl_lines = [x for x in dsl_lines if x]
for y, dsl_row in enumerate(dsl_lines):
row = []
dsl_cells = dsl_row.split("|")
dsl_cells = [c for c in dsl_cells if c]
for x, dsl_cell in enumerate(dsl_cells):
if dsl_cell not in tile_type_dict:
sys.exit("Map parse error: Invalid room type in map") | break
| random_line_split | |
world.py | This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp))
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S)ell, or (Q)uit?: ")
if user_input in ['Q', 'q']:
player.room.visited = 0
return
elif user_input in ['B', 'b']:
| print("Here's whats available to buy:\n")
self.trade(buyer=player, seller=self.trader) | conditional_block | |
types.rs | chain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(paym | : PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
&self.payment
}
pub fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// | ent | identifier_name |
types.rs |
}
}
#[derive(Deserialize, Serialize)]
pub struct CommandRequestObject {
#[serde(deserialize_with = "ObjectType::deserialize_request")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
#[serde(flatten)]
command: Command,
cid: Uuid,
}
impl CommandRequestObject {
pub fn new(command: Command, cid: Uuid) -> Self {
Self {
object_type: ObjectType::CommandRequestObject,
command,
cid,
}
}
pub fn command(&self) -> &Command {
&self.command
}
pub fn cid(&self) -> Uuid {
self.cid
}
pub fn into_parts(self) -> (Command, Uuid) {
(self.command, self.cid)
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum CommandStatus {
Success,
Failure,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct CommandResponseObject {
#[serde(deserialize_with = "ObjectType::deserialize_response")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
status: CommandStatus,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<OffChainError>,
#[serde(skip_serializing_if = "Option::is_none")]
cid: Option<Uuid>,
}
impl CommandResponseObject {
pub fn new(status: CommandStatus) -> Self {
Self {
object_type: ObjectType::CommandResponseObject,
status,
error: None,
cid: None,
}
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum OffChainErrorType {
#[serde(rename = "command_error")]
Command,
#[serde(rename = "protocol_error")]
Protocol,
}
// https://dip.diem.com/dip-1/#list-of-error-codes
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ErrorCode {
//
// HTTP Header Validation Error Codes
//
/// One of the following potential errors:
/// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the
/// command object. All command objects should have a field that is the request sender’s
/// address.
/// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(payment: PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
&self.payment
}
pub fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only | {
Err(D::Error::custom(format_args!("expected {:?}", variant)))
} | conditional_block | |
types.rs | chain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(payment: PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
| b fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// then | &self.payment
}
pu | identifier_body |
types.rs | payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// then “1” equals the same amount here. For any currency, the on-chain mapping must be used
/// for amounts.
pub amount: u64,
/// One of the supported on-chain currency types - ex. XUS, etc.
// TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject
pub currency: String,
/// Populated in the request. This value indicates the requested action to perform, and the
/// only valid value is charge.
pub action: ActionType,
/// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment
/// Command was created.
pub timestamp: u64,
}
/// Some fields are immutable after they are defined once. Others can be updated multiple times
/// (see below). Updating immutable fields with a different value results in a Command error.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentObject {
/// Information about the sender in this payment
pub sender: PaymentActorObject,
/// Information about the receiver in this payment
pub receiver: PaymentActorObject,
/// Unique reference ID of this payment on the payment initiator VASP (the VASP which
/// originally created this payment Object). This value should be globally unique. This field
/// is mandatory on payment creation and immutable after that. We recommend using a 128 bits
/// long UUID according to RFC4122 with "-"'s included.
pub reference_id: Uuid,
/// Used to refer an old payment known to the other VASP. For example, used for refunds. The
/// reference ID of the original payment will be placed into this field. This field is
/// mandatory on refund and immutable
pub originial_payment_reference_id: Option<Uuid>,
/// Signature of the recipient of this transaction encoded in hex. The is signed with the
/// compliance key of the recipient VASP and is used for on-chain attestation from the
/// recipient party. This may be omitted on blockchains which do not require on-chain
/// attestation.
pub recipient_signature: Option<String>,
/// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable
pub action: PaymentActionObject,
/// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length
/// of 255 characters. This field is optional but can only be written once.
pub description: Option<String>,
}
impl PaymentObject {
pub fn sender(&self) -> &PaymentActorObject {
&self.sender
}
pub fn receiver(&self) -> &PaymentActorObject {
&self.receiver
}
pub fn reference_id(&self) -> Uuid {
self.reference_id
}
pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject {
match actor {
Actor::Sender => self.sender(),
Actor::Receiver => self.receiver(),
}
}
pub fn recipient_signature(&self) -> Option<&str> {
self.recipient_signature.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
self.sender.validate_write_once_fields(&prior.sender)?;
self.receiver.validate_write_once_fields(&prior.receiver)?;
if self.reference_id != prior.reference_id {
return Err(WriteOnceError);
}
if self.originial_payment_reference_id != prior.originial_payment_reference_id {
return Err(WriteOnceError);
}
if self.action != prior.action {
return Err(WriteOnceError);
}
if prior.description.is_some() && prior.description != self.description {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct StatusObject {
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable.
pub status: Status,
/// In the case of an `abort` status, this field may be used to describe the reason for the
/// abort. Represents the error code of the corresponding error.
pub abort_code: Option<AbortCode>,
/// Additional details about this error. To be used only when `abort_code` is populated.
pub abort_message: Option<String>,
}
impl StatusObject {
pub fn status(&self) -> Status {
self.status
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Status {
/// No status is yet set from this actor.
None,
/// KYC data about the subaddresses is required by this actor.
NeedsKycData,
/// Transaction is ready for settlement according to this actor (i.e. the requried
/// signatures/KYC data has been provided.
ReadyForSettlement,
/// Indicates the actor wishes to abort this payment, instaed of settling it.
Abort,
/// Actor's KYC data resulted in a soft-match, request additional KYC data.
SoftMatch,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AbortCode {
/// The payment is rejected. It should not be used in the `original_payment_reference_id` field
/// of a new payment
Rejected,
}
/// Represents a national ID.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct NationalIdObject {
/// Indicates the national ID value - for example, a social security number
pub id_value: String, | random_line_split | ||
PhaseOne.js | position: "absolute",
top: 0,
left: 0,
bottom: 0,
right: 0,
display: "flex",
justifyContent: "center",
alignItems: "center",
backgroundColor: "rgba(0,0,0,0.5)",
"& *": {
color: "white",
},
"&.front": {
backgroundColor: "lightblue",
"& *": {
color: "black",
},
}, | },
biddingStatus: {
border: "1px solid #aaa",
padding: theme.spacing(1),
backgroundColor: "#eee",
},
coinImage: {
width: "60px",
cursor: "pointer",
borderRadius: "100%",
"&.selected": {
border: "2px solid orange",
boxShadow: "0 0 10px orange",
},
},
coinStatusTable: {
borderTop: `1px solid #aaa`,
borderBottom: `1px solid #aaa`,
marginTop: theme.spacing(1),
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: `1px solid #aaa`,
},
"& p": {
fontWeight: "bold",
},
},
btnGroup: {
marginTop: theme.spacing(2),
textAlign: "center",
"&>button": {
fontWeight: "bold",
},
"&>button:first-child": {
marginRight: theme.spacing(4),
},
},
gameStateHeader: {
fontWeight: "bold",
fontSize: "0.8rem",
},
[theme.breakpoints.down("xs")]: {
cardImage: {
width: "70px",
},
coinStatusTable: {
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: "none",
borderBottom: `1px solid #aaa`,
},
},
},
}));
const PhaseOne = ({ socket, gameState, room }) => {
const classes = useStyles();
const auth = useSelector((state) => state.auth);
const [activePlayer, setActivePlayer] = useState(null);
const [myState, setMyState] = useState(null);
const [selectedCoins, setSelectedCoins] = useState([]);
const [selectedValues, setSelectedValues] = useState(0);
const [startAudio] = useState(new Audio(startSound));
const [coinAudio] = useState(new Audio(coinSound));
const [passAudio] = useState(new Audio(passSound));
useEffect(() => {
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding != null
) {
coinAudio.currentTime = 0;
coinAudio.play();
}
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding == null
) {
passAudio.currentTime = 0;
passAudio.play();
}
const active = gameState?.players.find((player) => player.isTurn);
const me = gameState?.players.find(
(player) => player.userId === auth.userInfo._id
);
setActivePlayer(active);
setMyState(me);
}, [gameState]);
useEffect(() => {
startAudio.play();
}, []);
const onCoinClick = (index, value) => {
if (selectedCoins.includes(index)) {
setSelectedCoins(selectedCoins.filter((idx) => idx !== index));
setSelectedValues(selectedValues - value);
} else {
setSelectedCoins([...selectedCoins, index]);
setSelectedValues(selectedValues + value);
}
};
const onBidClick = () => {
socket.emit("updateForSale", {
type: TYPES.BID,
payload: {
selectedCoinsIndex: selectedCoins,
},
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
const onPassClick = () => {
socket.emit("updateForSale", {
type: TYPES.PASS,
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
// Utility to display coin values with commas in every three digits
const numberWithCommas = (num) => {
return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
};
// Utility to render client user's remaining coin values
const remainingCoins = () => {
return numberWithCommas(
myState.coins.reduce((acc, coin) => acc + coin.value, 0)
);
};
// Utility to find minimum bid available for current round
const minimumBid = () => {
const bids = gameState.players.map((player) => player.bidding);
return Math.max(...bids) + 1000;
};
return (
<>
{gameState && myState && (
<div
className={`${classes.root} ${myState.isTurn ? "" : classes.notTurn}`}
>
<Alert severity="info" variant="standard">
{activePlayer ? (
<AlertTitle>
Player{" "}
<span className={classes.activePlayerName}>
{activePlayer.username}
</span>{" "}
is making a decision...
</AlertTitle>
) : (
<AlertTitle> New round is about to begin...</AlertTitle>
)}
</Alert>
<div className={classes.boardWrapper}>
<Grid
container
className={classes.propertyRow}
justify="center"
alignItems="flex-start"
spacing={2}
>
<Grid item>
<Card className={classes.card}>
<CardMedia
src="https://i.pinimg.com/236x/b9/70/33/b97033a8708d2cbaf7d1990020a89a54--playing-cards-deck.jpg"
component="img"
className={classes.cardImage}
/>
<div className={classes.cardOverlay}>
<Typography variant="h5">
{gameState.remainingProperties}
</Typography>
</div>
</Card>
</Grid>
<Grid container item xs spacing={1} justify="flex-start">
{gameState.openProperties.map((propertyCard) => {
const renderCard = () => (
<Card className={classes.card}>
<CardMedia
src={propertyCard.image_url}
component="img"
className={classes.cardImage}
/>
<div className={`${classes.cardOverlay} front`}>
<Typography variant="h5">
{propertyCard.value}
</Typography>
</div>
</Card>
);
const taken = propertyCard.taken;
return (
<Grid item key={propertyCard.value}>
{taken ? (
<Badge
badgeContent={taken}
color="primary"
anchorOrigin={{
vertical: "bottom",
horizontal: "left",
}}
>
{renderCard()}
</Badge>
) : (
renderCard()
)}
</Grid>
);
})}
</Grid>
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Typography variant="overline" className={classes.gameStateHeader}>
Bidding Status
</Typography>
<Grid container className={classes.biddingRow} spacing={1}>
{gameState.players.map((player) => {
return (
<Grid item xs={12} sm={6} key={player.userId}>
<Typography
variant="h6"
className={`${
activePlayer && player.userId === activePlayer.userId
? classes.activePlayerName
: ""
} ${classes.biddingStatus}`}
>
{player.username}:{" "}
{player.bidding || player.bidding === 0
? `$ ${numberWithCommas(player.bidding)}`
: "PASS"}
</Typography>
</Grid>
);
})}
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Grid container spacing={1} justify="flex-start">
{myState.coins.map((coin, index) => {
return (
<Grid item key={index}>
<img
title={coin.value}
src={coin.image_url}
alt="coin"
className={`${classes.coinImage} ${
selectedCoins.includes(index) && "selected"
}`}
onClick={() => onCoinClick(index, coin.value | },
activePlayerName: {
color: theme.palette.error.dark,
fontWeight: "bold", | random_line_split |
PhaseOne.js | position: "absolute",
top: 0,
left: 0,
bottom: 0,
right: 0,
display: "flex",
justifyContent: "center",
alignItems: "center",
backgroundColor: "rgba(0,0,0,0.5)",
"& *": {
color: "white",
},
"&.front": {
backgroundColor: "lightblue",
"& *": {
color: "black",
},
},
},
activePlayerName: {
color: theme.palette.error.dark,
fontWeight: "bold",
},
biddingStatus: {
border: "1px solid #aaa",
padding: theme.spacing(1),
backgroundColor: "#eee",
},
coinImage: {
width: "60px",
cursor: "pointer",
borderRadius: "100%",
"&.selected": {
border: "2px solid orange",
boxShadow: "0 0 10px orange",
},
},
coinStatusTable: {
borderTop: `1px solid #aaa`,
borderBottom: `1px solid #aaa`,
marginTop: theme.spacing(1),
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: `1px solid #aaa`,
},
"& p": {
fontWeight: "bold",
},
},
btnGroup: {
marginTop: theme.spacing(2),
textAlign: "center",
"&>button": {
fontWeight: "bold",
},
"&>button:first-child": {
marginRight: theme.spacing(4),
},
},
gameStateHeader: {
fontWeight: "bold",
fontSize: "0.8rem",
},
[theme.breakpoints.down("xs")]: {
cardImage: {
width: "70px",
},
coinStatusTable: {
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: "none",
borderBottom: `1px solid #aaa`,
},
},
},
}));
const PhaseOne = ({ socket, gameState, room }) => {
const classes = useStyles();
const auth = useSelector((state) => state.auth);
const [activePlayer, setActivePlayer] = useState(null);
const [myState, setMyState] = useState(null);
const [selectedCoins, setSelectedCoins] = useState([]);
const [selectedValues, setSelectedValues] = useState(0);
const [startAudio] = useState(new Audio(startSound));
const [coinAudio] = useState(new Audio(coinSound));
const [passAudio] = useState(new Audio(passSound));
useEffect(() => {
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding != null
) |
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding == null
) {
passAudio.currentTime = 0;
passAudio.play();
}
const active = gameState?.players.find((player) => player.isTurn);
const me = gameState?.players.find(
(player) => player.userId === auth.userInfo._id
);
setActivePlayer(active);
setMyState(me);
}, [gameState]);
useEffect(() => {
startAudio.play();
}, []);
const onCoinClick = (index, value) => {
if (selectedCoins.includes(index)) {
setSelectedCoins(selectedCoins.filter((idx) => idx !== index));
setSelectedValues(selectedValues - value);
} else {
setSelectedCoins([...selectedCoins, index]);
setSelectedValues(selectedValues + value);
}
};
const onBidClick = () => {
socket.emit("updateForSale", {
type: TYPES.BID,
payload: {
selectedCoinsIndex: selectedCoins,
},
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
const onPassClick = () => {
socket.emit("updateForSale", {
type: TYPES.PASS,
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
// Utility to display coin values with commas in every three digits
const numberWithCommas = (num) => {
return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
};
// Utility to render client user's remaining coin values
const remainingCoins = () => {
return numberWithCommas(
myState.coins.reduce((acc, coin) => acc + coin.value, 0)
);
};
// Utility to find minimum bid available for current round
const minimumBid = () => {
const bids = gameState.players.map((player) => player.bidding);
return Math.max(...bids) + 1000;
};
return (
<>
{gameState && myState && (
<div
className={`${classes.root} ${myState.isTurn ? "" : classes.notTurn}`}
>
<Alert severity="info" variant="standard">
{activePlayer ? (
<AlertTitle>
Player{" "}
<span className={classes.activePlayerName}>
{activePlayer.username}
</span>{" "}
is making a decision...
</AlertTitle>
) : (
<AlertTitle> New round is about to begin...</AlertTitle>
)}
</Alert>
<div className={classes.boardWrapper}>
<Grid
container
className={classes.propertyRow}
justify="center"
alignItems="flex-start"
spacing={2}
>
<Grid item>
<Card className={classes.card}>
<CardMedia
src="https://i.pinimg.com/236x/b9/70/33/b97033a8708d2cbaf7d1990020a89a54--playing-cards-deck.jpg"
component="img"
className={classes.cardImage}
/>
<div className={classes.cardOverlay}>
<Typography variant="h5">
{gameState.remainingProperties}
</Typography>
</div>
</Card>
</Grid>
<Grid container item xs spacing={1} justify="flex-start">
{gameState.openProperties.map((propertyCard) => {
const renderCard = () => (
<Card className={classes.card}>
<CardMedia
src={propertyCard.image_url}
component="img"
className={classes.cardImage}
/>
<div className={`${classes.cardOverlay} front`}>
<Typography variant="h5">
{propertyCard.value}
</Typography>
</div>
</Card>
);
const taken = propertyCard.taken;
return (
<Grid item key={propertyCard.value}>
{taken ? (
<Badge
badgeContent={taken}
color="primary"
anchorOrigin={{
vertical: "bottom",
horizontal: "left",
}}
>
{renderCard()}
</Badge>
) : (
renderCard()
)}
</Grid>
);
})}
</Grid>
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Typography variant="overline" className={classes.gameStateHeader}>
Bidding Status
</Typography>
<Grid container className={classes.biddingRow} spacing={1}>
{gameState.players.map((player) => {
return (
<Grid item xs={12} sm={6} key={player.userId}>
<Typography
variant="h6"
className={`${
activePlayer && player.userId === activePlayer.userId
? classes.activePlayerName
: ""
} ${classes.biddingStatus}`}
>
{player.username}:{" "}
{player.bidding || player.bidding === 0
? `$ ${numberWithCommas(player.bidding)}`
: "PASS"}
</Typography>
</Grid>
);
})}
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Grid container spacing={1} justify="flex-start">
{myState.coins.map((coin, index) => {
return (
<Grid item key={index}>
<img
title={coin.value}
src={coin.image_url}
alt="coin"
className={`${classes.coinImage} ${
selectedCoins.includes(index) && "selected"
}`}
onClick={() => onCoinClick(index, | {
coinAudio.currentTime = 0;
coinAudio.play();
} | conditional_block |
main.go | .*?)"`)
func reformatObjectId(objectId string) string {
fmt.Println("objectID passed in: ", objectId)
var idStringBeginning = "ObjectId("
var idStringEnd = ")"
id := ObjectIdRegEx.FindString(objectId)
if id == "" {
fmt.Println("Error in reformatObjectId")
return ""
}
return idStringBeginning + id + idStringEnd
}
func randomMessage(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Begin randomMessage")
// Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"]
if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil |
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos | {
fmt.Println("Error in encryptLastId(): ", err)
} | conditional_block |
main.go | // Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"]
if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
photos := "/photos/"
gifs := "/gifs/"
if strings.Contains(*path, videos) {
*path = stripVideoPath(*path)
}
if strings.Contains(*path, photos) {
*path = stripPhotoPath(*path)
}
if strings.Contains(*path, gifs) {
*path = stripGifPath(*path)
}
return origPhotos
}
func stripVideoPath(path string) string {
videoIndex := strings.Index(path, "/videos/")
return path[videoIndex:]
}
func | stripPhotoPath | identifier_name | |
main.go | {{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"]
if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
photos := "/photos/"
gifs := "/gifs/"
if strings.Contains(*path, videos) {
*path = stripVideoPath(*path)
}
if strings.Contains(*path, photos) {
*path = stripPhotoPath(*path)
}
if strings.Contains(*path, gifs) {
*path = stripGifPath(*path)
}
return origPhotos
}
func stripVideoPath(path string) string {
videoIndex := strings.Index(path, "/videos/")
return path[videoIndex:]
}
func stripPhotoPath(path string) string | {
splitString := strings.SplitAfter(path, "/photos/")
return splitString[len(splitString)-1]
} | identifier_body | |
main.go | "(.*?)"`)
func reformatObjectId(objectId string) string {
fmt.Println("objectID passed in: ", objectId)
var idStringBeginning = "ObjectId("
var idStringEnd = ")"
id := ObjectIdRegEx.FindString(objectId)
if id == "" {
fmt.Println("Error in reformatObjectId")
return ""
}
return idStringBeginning + id + idStringEnd
}
func randomMessage(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Begin randomMessage")
// Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"] | }
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
| if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject | random_line_split |
clientlib-promoengine.js | ("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');">'+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}else{var p=U=="true"?"_self":"_blank";
s+=' <a class="learn_more" title='+L+" target="+p+" href="+ac+">"+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}}if(O!=""){s+='<span class="datestamp">'+O.split("T")[0]+"</span>"
}s+="</div>"
}s+="</div>";
var v="";
if(t==="true"){if(m!="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-bars detail-toggle non-product"></i></div>'
}if(m==="l4enterprise") | {s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-plus-square detail-toggle"></i></div>'
} | conditional_block | |
clientlib-promoengine.js | ||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>' | if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');">'+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}else{var p=U=="true"?"_self":"_blank";
s+=' <a class="learn_more" title='+L+" target="+p+" href="+ac+">"+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}}if(O!=""){s+='<span class="datestamp">'+O.split("T")[0]+"</span>"
}s+="</div>"
}s+="</div>";
var v="";
if(t==="true"){if(m!="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa | }else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>"; | random_line_split |
clientlib-promoengine.js | (b,e,f){function a(){var l=new Date();
var k=l.getTimezoneOffset();
k=(k/60)*-1;
var j=l.getTime();
if(k!==0){return(j+(3600000*k))
}return j
}var d=$('[name="resolvedPath"]').val();
var h=$('[name="pagePath"]').attr("content");
var g=[];
$("body").find(".hcontentcard.parbase").each(function(){var j=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var k=$(this).find("input#IsMbox").val();
if(j==="true"&&l!=""&&l!=undefined){g.push(l)
}});
var c=$('[name="localeVal"]').val();
if(c==="null"||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class | hcontentCarddisplay | identifier_name | |
clientlib-promoengine.js | var d=$('[name="resolvedPath"]').val();
var h=$('[name="pagePath"]').attr("content");
var g=[];
$("body").find(".hcontentcard.parbase").each(function(){var j=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var k=$(this).find("input#IsMbox").val();
if(j==="true"&&l!=""&&l!=undefined){g.push(l)
}});
var c=$('[name="localeVal"]').val();
if(c==="null"||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' | {var l=new Date();
var k=l.getTimezoneOffset();
k=(k/60)*-1;
var j=l.getTime();
if(k!==0){return(j+(3600000*k))
}return j
} | identifier_body | |
image_pyramid.py | _image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
|
def get_transmission(self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
print('Processing %s ...' % basename)
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0 | """Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0) | identifier_body |
image_pyramid.py | ,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
"""Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape |
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0)
def get_transmission(self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
print('Processing %s ...' % basename)
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0, tile | random_line_split | |
image_pyramid.py | _image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
"""Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0)
def | (self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
print('Processing %s ...' % basename)
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0 | get_transmission | identifier_name |
image_pyramid.py | _image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
"""Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0)
def get_transmission(self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
|
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0 | print('Processing %s ...' % basename) | conditional_block |
node.py | (i.e. immutable list) of `pge.Node` objects representing the
nodes that have control edges to this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def device(self):
"""
Returns:
TensorFlow device placement string desribing where this node should be
placed, or None to specify use of the default device.
"""
return self._device
def to_node_def(self):
"""
Returns:
A copy of the contents of this node as a NodeDef proto. The returned
proto will *not* change if this node is changed after the call, and
vice versa.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr(self, key: str) -> Any:
"""
Retrieve the value of an attribute by name.
Args:
key: Key under which the node's attribute is stored
Returns:
Current value of the attribute as an appropriate native Python type
(NOT a `tf.AttrValue` protobuf) or None if no value was found.
Raises:
ValueError if the indicated key does not have an attribute associated
with it.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr_keys(self) -> Tuple[str]:
"""
Returns:
Tuple (immutable list) of the keys of all attributes currently present
in the node
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
class ImmutableNode(Node):
"""
Wrapper for tf.NodeDef. Also maintains a pointer back to wrapper object for
the original graph.
"""
def __init__(self, g: 'graph.Graph', node_id: int, node_def: tf.NodeDef,
outputs_list: List[Tuple[tf.DType, tf.shape]]):
"""
Args:
g: pge.Graph object that represents the parent graph
node_id: Unique (within parent graph) integer identifier for this node
node_def: tf.NodeDef protobuf
outputs_list: List of (type, shape) pairs that describe the outputs of
this node
"""
Node.__init__(self, g, node_id=node_id, name=node_def.name,
op_name=node_def.op,
outputs=[tensor.Tensor(self, i, outputs_list[i][0],
outputs_list[i][1])
for i in range(len(outputs_list))],
device=node_def.device)
self._node_def = node_def
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
# Regenerate each time for now.
return tuple(_decode_inputs(self._node_def.input, self._graph))
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
# For now, regenerate every time
return tuple(_decode_control_inputs(self._node_def.input, self._graph))
def get_attr(self, key: str):
if key not in self._node_def.attr:
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
return _attr_value_to_python_type(self._node_def.attr[key])
def get_attr_keys(self) -> Tuple[str]:
return tuple(self._node_def.attr)
def to_node_def(self):
return deepcopy(self._node_def)
class MutableNode(Node):
"""
Wrapper for a change to a graph that will add a node. Accumulates the
parameters of the node to be added and can produce an appropriate
tf.NodeDef protobuf on demand.
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,
device: str = ""):
"""
This constructor should only be called from methods of the Graph
class.
Args:
g: The graph that this node is to be added to. The caller is
responsible for adding the node to the graph.
node_id: Unique (within the parent graph) integer identifier for the node
name: Name of the new node to add
op_name: Name of the operation that the new node will perform
device: TensorFlow device specification string indicating where this node
should be located. Default value of "" means "use the default device"
"""
Node.__init__(self, g, node_id=node_id, name=name,
op_name=op_name, outputs=[], device=device)
self._attributes = []
self._inputs = []
self._control_inputs = []
def add_attr(self, key: str, value):
"""Add a single attribute to the underlying NodeDef's attr list.
Args:
key: Name of the attribute. Must be unique.
value: Value to put in place for the attribute. Must be one of the
following types:
* tf.DType
* tf.TensorShape
"""
if key in self._attr_names():
raise ValueError("Already have an attribute called '{}'".format(key))
self._attributes.append((key, value))
def get_attr(self, key: str):
# self._attributes is a list of (key, value) pairs
matches = [p[1] for p in self._attributes if p[0] == key]
if 0 == len(matches):
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
elif len(matches) > 1:
raise ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
|
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input | return tuple([p[0] for p in self._attributes]) | identifier_body |
node.py | key: str) -> Any:
"""
Retrieve the value of an attribute by name.
Args:
key: Key under which the node's attribute is stored
Returns:
Current value of the attribute as an appropriate native Python type
(NOT a `tf.AttrValue` protobuf) or None if no value was found.
Raises:
ValueError if the indicated key does not have an attribute associated
with it.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr_keys(self) -> Tuple[str]:
"""
Returns:
Tuple (immutable list) of the keys of all attributes currently present
in the node
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
class ImmutableNode(Node):
"""
Wrapper for tf.NodeDef. Also maintains a pointer back to wrapper object for
the original graph.
"""
def __init__(self, g: 'graph.Graph', node_id: int, node_def: tf.NodeDef,
outputs_list: List[Tuple[tf.DType, tf.shape]]):
"""
Args:
g: pge.Graph object that represents the parent graph
node_id: Unique (within parent graph) integer identifier for this node
node_def: tf.NodeDef protobuf
outputs_list: List of (type, shape) pairs that describe the outputs of
this node
"""
Node.__init__(self, g, node_id=node_id, name=node_def.name,
op_name=node_def.op,
outputs=[tensor.Tensor(self, i, outputs_list[i][0],
outputs_list[i][1])
for i in range(len(outputs_list))],
device=node_def.device)
self._node_def = node_def
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
# Regenerate each time for now.
return tuple(_decode_inputs(self._node_def.input, self._graph))
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
# For now, regenerate every time
return tuple(_decode_control_inputs(self._node_def.input, self._graph))
def get_attr(self, key: str):
if key not in self._node_def.attr:
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
return _attr_value_to_python_type(self._node_def.attr[key])
def get_attr_keys(self) -> Tuple[str]:
return tuple(self._node_def.attr)
def to_node_def(self):
return deepcopy(self._node_def)
class MutableNode(Node):
"""
Wrapper for a change to a graph that will add a node. Accumulates the
parameters of the node to be added and can produce an appropriate
tf.NodeDef protobuf on demand.
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,
device: str = ""):
"""
This constructor should only be called from methods of the Graph
class.
Args:
g: The graph that this node is to be added to. The caller is
responsible for adding the node to the graph.
node_id: Unique (within the parent graph) integer identifier for the node
name: Name of the new node to add
op_name: Name of the operation that the new node will perform
device: TensorFlow device specification string indicating where this node
should be located. Default value of "" means "use the default device"
"""
Node.__init__(self, g, node_id=node_id, name=name,
op_name=op_name, outputs=[], device=device)
self._attributes = []
self._inputs = []
self._control_inputs = []
def add_attr(self, key: str, value):
"""Add a single attribute to the underlying NodeDef's attr list.
Args:
key: Name of the attribute. Must be unique.
value: Value to put in place for the attribute. Must be one of the
following types:
* tf.DType
* tf.TensorShape
"""
if key in self._attr_names():
raise ValueError("Already have an attribute called '{}'".format(key))
self._attributes.append((key, value))
def get_attr(self, key: str):
# self._attributes is a list of (key, value) pairs
matches = [p[1] for p in self._attributes if p[0] == key]
if 0 == len(matches):
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
elif len(matches) > 1:
raise ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
return tuple([p[0] for p in self._attributes])
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def | to_node_def | identifier_name | |
node.py | raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def to_node_def(self):
ret = tf.NodeDef()
ret.name = self.name
ret.op = self.op_name
for input_tensor in self.inputs:
ret.input.append(input_tensor.name)
for control_input_node in self.control_inputs:
ret.input.append("^" + control_input_node.name)
ret.device = self.device
for (attr_name, attr_value) in self._attributes:
# Funky syntax for setting a field of a union in a protobuf
ret.attr[attr_name].CopyFrom(_python_type_to_attr_value(attr_value))
return ret
def set_device(self, device: str):
self._device = device
################################################################################
# Stuff below this line is private to this file.
def _canonicalize_output_name(name: str):
"""
Args:
name: Name for an op output as it would appear in the protocol buffer
representation of a an operator graph
Returns:
A name in the form "<op name>:<output index>"
"""
if ":" in name:
return name
else:
return name + ":0"
def _decode_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
tensor.Tensor]:
"""
Extract and decode the inputs in a list of TensorFlow input specification
strings.
Skips over control inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Tensor` objects corresponding to each of the specified inputs.
"""
# Input names in the protobuf take three forms:
# "^node_name" --> Control input from indicated node
# "node_name" --> Input from output number 0 of indicated node
# "node_name:ix" --> Input from output number <ix> of indicated node
# Start by filtering out the control inputs and turning "node_name" into
# "node_name:0".
input_names = [_canonicalize_output_name(n) for n in inputs
if not n.startswith("^")]
input_tensors = []
for name in input_names:
# Name is in form "node:output number"
node_name, output_ix_name = name.split(":")
output_ix = int(output_ix_name)
input_tensors.append(g[node_name].output(output_ix))
return input_tensors
def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
Node]:
"""
Extract and decode the control inputs in a list of TensorFlow input
specification strings.
Skips data inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Node` objects corresponding to each of the control inputs.
"""
# Control inputs start with "^". Skip everything else and strip off the
# leading caret character
control_input_names = [n[1:] for n in inputs if n.startswith("^")]
return [g[name] for name in control_input_names]
def _python_type_to_attr_value(value: Any) -> tf.AttrValue:
"""
Convert a Python object or scalar value to a TensorFlow `tf.AttrValue`
protocol buffer message.
Args:
value: Python object to be converted
Returns:
An AttrValue object that wraps the contents of `value` in the most
appropriate way available.
"""
# TODO(frreiss): Handle AttrValues that are lists
if isinstance(value, tf.AttrValue):
# TODO(frreiss): Should this case result in an error?
return value
# Scalar types, in the order they appear in the .proto file
elif isinstance(value, str):
return tf.AttrValue(s=tf.compat.as_bytes(value))
elif isinstance(value, int):
return tf.AttrValue(i=value)
elif isinstance(value, float):
return tf.AttrValue(f=value)
elif isinstance(value, bool):
return tf.AttrValue(b=value)
elif isinstance(value, tf.DType):
return tf.AttrValue(type=value.as_datatype_enum())
elif isinstance(value, tf.TensorShape):
return tf.AttrValue(shape=value.as_proto())
elif isinstance(value, np.ndarray):
return tf.AttrValue(tensor=tf.make_tensor_proto(values=value))
# TODO(frreiss): Populate the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert a {} to "
"tf.AttrValue".format(type(value)))
def _attr_value_to_python_type(attr_value: tf.AttrValue) -> Any:
"""
Inverse of _python_type_to_attr_value().
Args:
attr_value: Protocol buffer version of a node's attribute value
Returns:
A Python object or built-in type corresponding to the field in
`attr_value` that is in use.
"""
# TODO(frreiss): Handle AttrValues that are lists
if attr_value.HasField("s"): # str
# TODO(frreiss): Should we return the binary value here?
return tf.compat.as_str(attr_value.s)
elif attr_value.HasField("i"): # int
return attr_value.i
elif attr_value.HasField("f"): # float
return attr_value.f
elif attr_value.HasField("b"): # bool
return attr_value.b
elif attr_value.HasField("type"): # DType
| return tf.DType(attr_value.type) | conditional_block | |
node.py | ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
return tuple([p[0] for p in self._attributes])
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def to_node_def(self):
ret = tf.NodeDef()
ret.name = self.name
ret.op = self.op_name
for input_tensor in self.inputs:
ret.input.append(input_tensor.name)
for control_input_node in self.control_inputs:
ret.input.append("^" + control_input_node.name)
ret.device = self.device
for (attr_name, attr_value) in self._attributes:
# Funky syntax for setting a field of a union in a protobuf
ret.attr[attr_name].CopyFrom(_python_type_to_attr_value(attr_value))
return ret
def set_device(self, device: str):
self._device = device
################################################################################
# Stuff below this line is private to this file.
def _canonicalize_output_name(name: str):
"""
Args:
name: Name for an op output as it would appear in the protocol buffer
representation of a an operator graph
Returns:
A name in the form "<op name>:<output index>"
"""
if ":" in name:
return name
else:
return name + ":0"
def _decode_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
tensor.Tensor]:
"""
Extract and decode the inputs in a list of TensorFlow input specification
strings.
Skips over control inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Tensor` objects corresponding to each of the specified inputs.
"""
# Input names in the protobuf take three forms:
# "^node_name" --> Control input from indicated node
# "node_name" --> Input from output number 0 of indicated node
# "node_name:ix" --> Input from output number <ix> of indicated node
# Start by filtering out the control inputs and turning "node_name" into
# "node_name:0".
input_names = [_canonicalize_output_name(n) for n in inputs
if not n.startswith("^")]
input_tensors = []
for name in input_names:
# Name is in form "node:output number"
node_name, output_ix_name = name.split(":")
output_ix = int(output_ix_name)
input_tensors.append(g[node_name].output(output_ix))
return input_tensors
def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
Node]:
"""
Extract and decode the control inputs in a list of TensorFlow input
specification strings.
Skips data inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Node` objects corresponding to each of the control inputs.
"""
# Control inputs start with "^". Skip everything else and strip off the
# leading caret character
control_input_names = [n[1:] for n in inputs if n.startswith("^")]
return [g[name] for name in control_input_names]
def _python_type_to_attr_value(value: Any) -> tf.AttrValue:
"""
Convert a Python object or scalar value to a TensorFlow `tf.AttrValue`
protocol buffer message.
Args:
value: Python object to be converted
Returns:
An AttrValue object that wraps the contents of `value` in the most
appropriate way available.
"""
# TODO(frreiss): Handle AttrValues that are lists
if isinstance(value, tf.AttrValue):
# TODO(frreiss): Should this case result in an error?
return value
# Scalar types, in the order they appear in the .proto file
elif isinstance(value, str):
return tf.AttrValue(s=tf.compat.as_bytes(value))
elif isinstance(value, int):
return tf.AttrValue(i=value)
elif isinstance(value, float):
return tf.AttrValue(f=value)
elif isinstance(value, bool):
return tf.AttrValue(b=value)
elif isinstance(value, tf.DType):
return tf.AttrValue(type=value.as_datatype_enum())
elif isinstance(value, tf.TensorShape):
return tf.AttrValue(shape=value.as_proto())
elif isinstance(value, np.ndarray):
return tf.AttrValue(tensor=tf.make_tensor_proto(values=value))
# TODO(frreiss): Populate the "func" and "placeholder" fields of the union
# here | else:
raise ValueError("Don't know how to convert a {} to " | random_line_split | |
goto_definition.rs | Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) |
#[track_caller]
fn check(fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }",
expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }",
expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
| {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
} | identifier_body |
goto_definition.rs | Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => | full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
}
#[track_caller]
fn check(fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }",
expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }",
expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
| {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range, | conditional_block |
goto_definition.rs | Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
}
#[track_caller]
fn | (fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }",
expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }",
expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
| check | identifier_name |
goto_definition.rs | ::Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
}
#[track_caller]
fn check(fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }", | expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
| expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }", | random_line_split |
sink_test.go | , cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 64)
return tableDescriptorTopic{
tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}).BuildImmutableTable()}
}
ctx := context.Background()
s, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{UseDatabase: "d"})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw)
sqlDB.Exec(t, `CREATE DATABASE d`)
sinkURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
sinkURL.Path = `d`
fooTopic := overrideTopic(`foo`)
barTopic := overrideTopic(`bar`)
targets := jobspb.ChangefeedTargets{
fooTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `foo`},
barTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `bar`},
}
sink, err := makeSQLSink(sinkURL.String(), `sink`, targets)
require.NoError(t, err)
defer func() { require.NoError(t, sink.Close()) }()
// Empty
require.NoError(t, sink.Flush(ctx))
// Undeclared topic
require.EqualError(t,
sink.EmitRow(ctx, overrideTopic(`nope`), nil, nil, zeroTS), `cannot emit to undeclared topic: `)
// With one row, nothing flushes until Flush is called.
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v0`), zeroTS))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{},
)
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`k1`, `v0`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Verify the implicit flushing
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`0`}})
for i := 0; i < sqlSinkRowBatchSize+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v`+strconv.Itoa(i)), zeroTS))
}
// Should have auto flushed after sqlSinkRowBatchSize
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`3`}})
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`4`}})
sqlDB.Exec(t, `TRUNCATE sink`)
// Two tables interleaved in time
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v0`), zeroTS)) | require.NoError(t, sink.EmitRow(ctx, barTopic, []byte(`kbar`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v1`), zeroTS)) | random_line_split | |
sink_test.go |
func (p *asyncProducerMock) Close() error {
close(p.inputCh)
close(p.successesCh)
close(p.errorsCh)
return nil
}
// consumeAndSucceed consumes input messages and sends them to successes channel.
// Returns function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consumeAndSucceed() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.successesCh <- m
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// consume consumes input messages but does not acknowledge neither successes, nor errors.
// In essence, this simulates an unreachable kafka sink.
// Use acknowledge methods to acknowledge successes or errors.
// Returns a function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consume() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.mu.Lock()
p.mu.outstanding = append(p.mu.outstanding, m)
p.mu.Unlock()
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// acknowledge sends acknowledgements on the specified channel
// for each of the outstanding messages.
func (p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding {
ch <- m
}
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) outstanding() int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: | { panic(`unimplemented`) } | identifier_body | |
sink_test.go | p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding |
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) outstanding() int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 6 | {
ch <- m
} | conditional_block |
sink_test.go | p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding {
ch <- m
}
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) | () int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 6 | outstanding | identifier_name |
amqp.rs | 020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)),
}
}
}
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
}
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
}
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn | (&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Terminating | on_signal | identifier_name |
amqp.rs | 020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)), |
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
}
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
}
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Terminating | }
}
} | random_line_split |
amqp.rs | 020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)),
}
}
}
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> |
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
}
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Termin | {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
} | identifier_body |
amqp.rs | 020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)),
}
}
}
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
}
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => |
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Termin | {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
} | conditional_block |
mod.rs | ) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: true,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
pub fn origin(&self) -> InputOrigin {
self.origin
}
/// Consumes the object and returns the underlying readable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn InputFeatures> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// read. No digest is returned if there was ever a seek on the input
/// stream, since in that case the results will not be reliable. We also
/// return None if the stream was never read, which is another common
/// TeX access pattern: files are opened, immediately closed, and then
/// opened again. Finally, no digest is returned if the file is marked read-only.
pub fn into_name_digest(self) -> (OsString, Option<DigestData>) {
if self.did_unhandled_seek || !self.ever_read || self.read_only {
(self.name, None)
} else {
(self.name, Some(DigestData::from(self.digest)))
}
}
pub fn getc(&mut self) -> Result<u8> {
let mut byte = [0u8; 1];
if self.read(&mut byte[..1])? == 0 {
// EOF
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into());
}
Ok(byte[0])
}
}
impl Read for InputHandle {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.ever_read = true;
let n = self.inner.read(buf)?;
if !self.read_only {
self.digest.input(&buf[..n]);
}
Ok(n)
}
}
impl InputFeatures for InputHandle {
fn get_size(&mut self) -> Result<usize> {
self.inner.get_size()
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(0) => {
// As described above, there is a common pattern in TeX file
// accesses: read a few bytes to sniff, then go back to the
// beginning. We should tidy up the I/O to just buffer instead
// of seeking, but in the meantime, we can handle this.
self.digest = Default::default();
self.ever_read = false;
}
SeekFrom::Current(0) => {
// Noop. This must *not* clear the ungetc buffer for our
// current PDF startxref/xref parsing code to work.
}
_ => {
self.did_unhandled_seek = true;
}
}
let offset = self.inner.try_seek(pos)?;
Ok(offset)
}
}
pub struct OutputHandle {
name: OsString,
inner: Box<dyn Write>,
digest: digest::DigestComputer,
}
impl OutputHandle {
pub fn new<T: 'static + Write>(name: &OsStr, inner: T) -> OutputHandle {
OutputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
digest: digest::create(),
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
/// Consumes the object and returns the underlying writable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn Write> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// written.
pub fn into_name_digest(self) -> (OsString, DigestData) {
(self.name, DigestData::from(self.digest))
}
}
impl Write for OutputHandle {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
| /// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) | OpenResult::NotAvailable
}
| identifier_body |
mod.rs | Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
(**self).input_open_primary(status)
}
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_format(name, status)
}
fn write_format(
&mut self,
name: &str,
data: &[u8],
status: &mut dyn StatusBackend,
) -> Result<()> {
(**self).write_format(name, data, status)
}
}
/// A special IoProvider that can make TeX format files.
///
/// A “bundle” is expected to contain a large number of TeX support files —
/// for instance, a compilation of a TeXLive distribution. In terms of the
/// software architecture, though, what is special about a bundle is that one
/// can generate one or more TeX format files from its contents without
/// reference to any other I/O resources.
pub trait Bundle: IoProvider {
/// Get a cryptographic digest summarizing this bundle’s contents.
///
/// The digest summarizes the exact contents of every file in the bundle.
/// It is computed from the sorted names and SHA256 digests of the
/// component files [as implemented in the script
/// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138)
/// in the `tectonic-staging` module.
///
/// The default implementation gets the digest from a file name
/// `SHA256SUM`, which is expected to contain the digest in hex-encoded
/// format.
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
h.take(64).read_to_string(&mut text)?;
text
}
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg(
"itar-format bundle does not provide needed SHA256SUM file".to_owned(),
)
.into());
}
OpenResult::Err(e) => {
return Err(e);
}
};
Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data"))
}
}
impl<B: Bundle + ?Sized> Bundle for Box<B> {
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
(**self).get_digest(status)
}
}
// Some generically helpful InputFeatures impls
impl<R: Read> InputFeatures for GzDecoder<R> {
fn get_size(&mut self) -> Result<usize> {
Err(ErrorKind::NotSizeable.into())
}
fn try_seek(&mut self, _: SeekFrom) -> Result<u64> {
Err(ErrorKind::NotSeekable.into())
}
}
impl InputFeatures for Cursor<Vec<u8>> {
fn get_size(&mut self) -> Result<usize> {
Ok(self.get_ref().len())
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
Ok(self.seek(pos)?)
}
}
// Reexports
pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo};
pub use self::memory::MemoryIo;
pub use self::setup::{IoSetup, IoSetupBuilder};
pub use self::stack::IoStack;
pub use self::stdstreams::GenuineStdoutIo;
// Helpful.
pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> {
use std::io::ErrorKind::NotFound;
match File::open(path) {
Ok(f) => OpenResult::Ok(f),
Err(e) => {
if e.kind() == NotFound {
OpenResult::NotAvailable
} else {
OpenResult::Err(e.into())
}
}
}
}
/// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`,
/// or extra separators '/' so that it is of the form
///
/// ```text
/// path/to/my/file.txt
/// ../../path/to/parent/dir/file.txt
/// /absolute/path/to/file.txt
/// ```
///
/// Does not strip whitespace.
///
/// Returns `None` if the path refers to a parent of the root.
fn try_normalize_tex_path(path: &str) -> Option<String> {
use std::iter::repeat;
if path.is_empty() {
return Some("".into());
}
let mut r = Vec::new();
let mut parent_level = 0;
let mut has_root = false;
// TODO: We need to handle a prefix on Windows (i.e. "C:").
for (i, c) in path.split('/').enumerate() {
match c {
"" if i == 0 => {
has_root = true;
r.push("");
}
"" | "." => {}
".." => {
match r.pop() {
// about to pop the root
Some("") => return None,
None => parent_level += 1, | _ => {}
}
} | random_line_split | |
mod.rs | : &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
(**self).input_open_primary(status)
}
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_format(name, status)
}
fn write_format(
&mut self,
name: &str,
data: &[u8],
status: &mut dyn StatusBackend,
) -> Result<()> {
(**self).write_format(name, data, status)
}
}
/// A special IoProvider that can make TeX format files.
///
/// A “bundle” is expected to contain a large number of TeX support files —
/// for instance, a compilation of a TeXLive distribution. In terms of the
/// software architecture, though, what is special about a bundle is that one
/// can generate one or more TeX format files from its contents without
/// reference to any other I/O resources.
pub trait Bundle: IoProvider {
/// Get a cryptographic digest summarizing this bundle’s contents.
///
/// The digest summarizes the exact contents of every file in the bundle.
/// It is computed from the sorted names and SHA256 digests of the
/// component files [as implemented in the script
/// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138)
/// in the `tectonic-staging` module.
///
/// The default implementation gets the digest from a file name
/// `SHA256SUM`, which is expected to contain the digest in hex-encoded
/// format.
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
h.take(64).read_to_string(&mut text)?;
text
}
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg(
"itar-format bundle does not provide needed SHA256SUM file".to_owned(),
)
.into());
}
OpenResult::Err(e) => {
return Err(e);
}
};
Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data"))
}
}
impl<B: Bundle + ?Sized> Bundle for Box<B> {
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
(**self).get_digest(status)
}
}
// Some generically helpful InputFeatures impls
impl<R: Read> InputFeatures for GzDecoder<R> {
fn get_size(&mut self) -> Result<usize> {
Err(ErrorKind::NotSizeable.into())
}
fn try_seek(&mut self, _: SeekFrom) -> Result<u64> {
Err(ErrorKind::NotSeekable.into())
}
}
impl InputFeatures for Cursor<Vec<u8>> {
fn get_size(&mut self) -> Result<usize> {
Ok(self.get_ref().len())
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
Ok(self.seek(pos)?)
}
}
// Reexports
pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo};
pub use self::memory::MemoryIo;
pub use self::setup::{IoSetup, IoSetupBuilder};
pub use self::stack::IoStack;
pub use self::stdstreams::GenuineStdoutIo;
// Helpful.
pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> {
use std::io::ErrorKind::NotFound;
match File::open(path) {
Ok(f) => OpenResult::Ok(f),
Err(e) => {
if e.kind() == NotFound {
OpenResult::NotAvailable
} else {
| OpenResult::Err(e.into())
}
} | conditional_block | |
mod.rs | ) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: true,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
pub fn origin(&self) -> InputOrigin {
self.origin
}
/// Consumes the object and returns the underlying readable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn InputFeatures> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// read. No digest is returned if there was ever a seek on the input
/// stream, since in that case the results will not be reliable. We also
/// return None if the stream was never read, which is another common
/// TeX access pattern: files are opened, immediately closed, and then
/// opened again. Finally, no digest is returned if the file is marked read-only.
pub fn into_name_digest(self) -> (OsString, Option<DigestData>) {
if self.did_unhandled_seek || !self.ever_read || self.read_only {
(self.name, None)
} else {
(self.name, Some(DigestData::from(self.digest)))
}
}
pub fn getc(&mut self) -> Result<u8> {
let mut byte = [0u8; 1];
if self.read(&mut byte[..1])? == 0 {
// EOF
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into());
}
Ok(byte[0])
}
}
impl Read for InputHandle {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.ever_read = true;
let n = self.inner.read(buf)?;
if !self.read_only {
self.digest.input(&buf[..n]);
}
Ok(n)
}
}
impl InputFeatures for InputHandle {
fn get_size(&mut self) -> Result<usize> {
self.inner.get_size()
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(0) => {
// As described above, there is a common pattern in TeX file
// accesses: read a few bytes to sniff, then go back to the
// beginning. We should tidy up the I/O to just buffer instead
// of seeking, but in the meantime, we can handle this.
self.digest = Default::default();
self.ever_read = false;
}
SeekFrom::Current(0) => {
// Noop. This must *not* clear the ungetc buffer for our
// current PDF startxref/xref parsing code to work.
}
_ => {
self.did_unhandled_seek = true;
}
}
let offset = self.inner.try_seek(pos)?;
Ok(offset)
}
}
pub struct OutputHandle {
name: OsString,
inner: Box<dyn Write>,
digest: digest::DigestComputer,
}
impl OutputHandle {
pub fn ne | : 'static + Write>(name: &OsStr, inner: T) -> OutputHandle {
OutputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
digest: digest::create(),
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
/// Consumes the object and returns the underlying writable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn Write> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// written.
pub fn into_name_digest(self) -> (OsString, DigestData) {
(self.name, DigestData::from(self.digest))
}
}
impl Write for OutputHandle {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend | w<T | identifier_name |
backend_helper.go | -csi-driver/client/apis/xuanwu/v1"
)
const (
ApiVersion = "v1"
XuanWuApiVersion = "xuanwu.huawei.io/v1"
KindSecret = "Secret"
KindConfigMap = "ConfigMap"
KindStorageBackendClaim = "StorageBackendClaim"
YamlSeparator = "---"
)
// BackendConfiguration backend config
type BackendConfiguration struct {
Name string `json:"name,omitempty" yaml:"name"`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"`
}
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) | (claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := | ShowWithClaimOption | identifier_name |
backend_helper.go | "`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"`
}
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) ShowWithClaimOption(claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
jsonStr, ok := configmap.Data["csi.json"]
if !ok {
return result, errors.New("not found csi.json config")
}
backendContent, err := AnalyseBackendExist(jsonStr)
if err != nil {
return nil, err
}
var backends []*BackendConfiguration
if _, ok = backendContent.([]interface{}); ok | {
backends, err = LoadMultipleBackendFromConfigmap(jsonStr)
} | conditional_block | |
backend_helper.go | by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"`
}
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) ShowWithClaimOption(claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
jsonStr, ok := configmap.Data["csi.json"]
if !ok {
return result, errors.New("not found csi.json config")
}
backendContent, err := AnalyseBackendExist(jsonStr)
if err != nil {
return nil, err
}
var backends []*BackendConfiguration
if _, ok = backendContent.([]interface{}); ok {
backends, err = LoadMultipleBackendFromConfigmap(jsonStr)
} else {
backends, err = LoadSingleBackendFromConfigmap(jsonStr)
}
if err != nil {
return nil, err
}
for _, backend := range backends {
result[backend.Name] = backend
}
return result, nil
}
//AnalyseBackendExist analyse backend,an error is returned if backends not exist
func AnalyseBackendExist(jsonStr string) (interface{}, error) {
var config map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
backendContent, ok := config["backends"]
if !ok {
return nil, errors.New("not found backends config")
}
return backendContent, nil
}
// LoadSingleBackendFromConfigmap load single backend
func LoadSingleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends *BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return []*BackendConfiguration{config.Backends}, nil
}
// LoadMultipleBackendFromConfigmap load multiple backend
func LoadMultipleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) | {
config := struct {
Backends []*BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return config.Backends, nil
} | identifier_body | |
backend_helper.go | awei-csi-driver/client/apis/xuanwu/v1"
)
const (
ApiVersion = "v1"
XuanWuApiVersion = "xuanwu.huawei.io/v1"
KindSecret = "Secret"
KindConfigMap = "ConfigMap"
KindStorageBackendClaim = "StorageBackendClaim"
YamlSeparator = "---"
)
// BackendConfiguration backend config
type BackendConfiguration struct {
Name string `json:"name,omitempty" yaml:"name"`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"` | }
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) ShowWithClaimOption(claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := | Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"` | random_line_split |
drawing_support.py | :param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
| max_y = point.y | conditional_block | |
drawing_support.py | points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# | r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
| random_line_split | |
drawing_support.py | _y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
|
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 | """
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency | identifier_body |
drawing_support.py | = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def | (image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 | calculate_hit_box_points_detailed | identifier_name |
groupspec.pb.go |
func init() {
proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2)
}
var fileDescriptor_8afb9070f2e843b2 = []byte{
// 351 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0xc6, 0x93, 0x16, 0x21, 0x9a, 0x32, 0xa0, 0x80, 0x44, 0xd4, 0x21, 0xae, 0x2c, 0x21, 0x82,
0x2a, 0x39, 0x22, 0x6c, 0x1d, 0xb3, 0xb0, 0x30, 0xa0, 0xb0, 0xb1, 0x39, 0xe1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m | {
proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta2.GroupSpec")
} | identifier_body | |
groupspec.pb.go | 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Resources) > 0 {
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
{
size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int {
offset -= sovGroupspec(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GroupSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovGroupspec(uint64(l))
}
l = m.Requirements.Size()
n += 1 + l + sovGroupspec(uint64(l))
if len(m.Resources) > 0 {
for _, e := range m.Resources {
l = e.Size()
n += 1 + l + sovGroupspec(uint64(l))
}
}
return n
}
func sovGroupspec(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGroupspec(x uint64) (n int) {
return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GroupSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + intStringLen
if postIndex < 0 { | return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF | random_line_split | |
groupspec.pb.go | 1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Resources) > 0 {
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
{
size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 | {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
} | conditional_block | |
groupspec.pb.go | () {
proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2)
}
var fileDescriptor_8afb9070f2e843b2 = []byte{
// 351 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0xc6, 0x93, 0x16, 0x21, 0x9a, 0x32, 0xa0, 0x80, 0x44, 0xd4, 0x21, 0xae, 0x2c, 0x21, 0x82,
0x2a, 0x39, 0x22, 0x6c, 0x1d, 0xb3, 0xb0, 0x30, 0xa0, 0xb0, 0xb1, 0x39, 0xe1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return d | init | identifier_name | |
main.go | Attributes.Defense
}
func (u Unit) Strength() int {
return u.Crunch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool |
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
| {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
} | identifier_body |
main.go | (i int) Unit {
u.combatAttrMods.Accuracy += i
return u
}
func (u Unit) ModVitality(i int) Unit {
u.combatAttrMods.Vitality += i
return u
}
func (u Unit) ModSpeed(i int) Unit {
u.combatAttrMods.Speed += i
return u
}
func (u Unit) ModWillpower(i int) Unit {
u.combatAttrMods.Willpower += i
return u
}
func (u Unit) ModResistance(i int) Unit {
u.combatAttrMods.Resistance += i
return u
}
func (u Unit) Speed() int {
return u.Crunch().currentAttributes.Speed
}
func (u Unit) Defense() int {
return u.Crunch().currentAttributes.Defense
}
func (u Unit) Strength() int {
return u.Crunch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
}
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk | ModAccuracy | identifier_name | |
main.go | runch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
}
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results | {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
}
case "select":
var attr string
if active.IsHuman { | conditional_block | |
main.go |
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
}
case "select":
var attr string
if active.IsHuman {
attr = readAttr("pick stat")
} else {
attr = validAttrs[randomInt(0, len(validAttrs)-1)]
}
result.Attr = attr
switch attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "vitality":
unit = unit.ModVitality(result.Damange)
}
}
if result.Attr == "miss" {
fmt.Printf("%v attack %v missed %v\r\n", active.Name, atk.Name, unit.Name)
} else {
fmt.Printf("%v attack %v hit %v and dealt %v to %v\r\n", active.Name, atk.Name, unit.Name, result.Damange, result.Attr)
if unit.Hp <= 0 {
fmt.Println(active.Name, "killed", unit.Name)
}
}
}
}
}
units[i] = unit
}
activeIndex := -1
for i, unit := range units {
if unit.Name == active.Name {
active = unit
activeIndex = i
break
}
}
active.Fatigue += atk.FatigueCost
active = active.Crunch()
units[activeIndex] = active
return units
}
func resolveAttack(attack Attack, unit Unit) AttackResult {
//TODO: support magic attacks
if attack.Team == unit.Team {
return AttackResult{
Attr: attack.Stat,
Damange: attack.PowerMod,
}
}
if attack.EffType == PHYS {
if attack.Accuracy < unit.Speed() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Defense()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
if attack.Accuracy < unit.Willpower() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Resistance()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
func PickTargets(atk Attack, lvl int, team int, human bool, units []Unit) []Unit {
if human {
fmt.Println("")
for _, unit := range units {
printUnit(unit)
}
fmt.Println("")
return pickPlayerTargets(atk.Targets, units)
}
return npcPickTargets(atk, lvl, team, units)
}
func PickAttack(unit Unit) Attack {
var atk Attack
if unit.IsHuman {
atk = pickPlayerAttack(unit.Attacks)
} else {
atk = npcPickAttack(unit)
}
atk.Team = unit.Team
//TODO: phys vs mag
if atk.EffType == PHYS {
atk.PowerMod += unit.Strength()
atk.Accuracy += unit.Accuracy()
} else {
atk.PowerMod += unit.Willpower()
atk.Accuracy += int((unit.Willpower() + unit.Resistance()) / 2)
}
return atk
}
func pickPlayerTargets(num int, units []Unit) []Unit {
var targets []Unit
for i := 0; i < num; i++ {
fmt.Println("Target", i)
targets = append(targets, selectPlayerTarget(units))
}
return targets
}
func npcPickAttack(unit Unit) Attack {
if unit.AiLevel < 3 {
return unit.Attacks[randomInt(0, len(unit.Attacks)-1)]
}
if unit.Fatigue > 4 {
for _, atk := range unit.Attacks {
if atk.Name == "rest" {
return atk
}
}
}
var basic Attack
for _, atk := range unit.Attacks {
if atk.Name == "big" && unit.Strength() < unit.Accuracy() {
return atk
}
if atk.Name == "small" && unit.Accuracy() < unit.Strength() {
return atk
}
if atk.Name == "basic" {
basic = atk
}
}
return basic
}
func npcPickTargets(atk Attack, lvl int, team int, units []Unit) []Unit {
targets := []Unit{}
for i := 0; i < atk.Targets; i++ {
if lvl < 2 {
randUnit := randomInt(0, len(units)-1)
targets = append(targets, units[randUnit])
} else {
targets = append(targets, findFirstValidTarget(atk.PowerMod, team, units))
}
}
return targets
}
func findFirstValidTarget(power int, team int, units []Unit) Unit {
for _, unit := range units {
if power > 0 {
if unit.Team != team {
return unit
}
} else if unit.Team == team {
return unit
}
}
if len(units) < 1 {
return Unit{}
}
return units[0]
}
func selectPlayerTarget(units []Unit) Unit {
chosen := read("pick target")
target := Unit{}
for _, unit := range units {
if unit.Name == chosen {
target = unit
}
}
if target.Name == "" {
fmt.Println("invalid target")
return selectPlayerTarget(units)
}
return target
}
func pickPlayerAttack(atks []Attack) Attack {
atkStr := read("choose attack")
var atk Attack
for _, ak := range atks {
if ak.Name == atkStr {
atk = ak | }
} | random_line_split | |
execution.rs | /// # Note
///
/// - This is the key where storage allocation, pushing and pulling is rooted
/// using the `SpreadLayout` and `SpreadAllocate` traits primarily.
/// - This trait is automatically implemented by the ink! codegen.
/// - The existence of this trait allows to customize the root key in future
/// versions of ink! if needed.
pub trait ContractRootKey {
const ROOT_KEY: Key;
}
/// Returns `Ok` if the caller did not transfer additional value to the callee.
///
/// # Errors
///
/// If the caller did send some amount of transferred value to the callee.
#[inline]
pub fn deny_payment<E>() -> Result<(), DispatchError>
where
E: Environment,
{
let transferred = ink_env::transferred_balance::<E>();
if transferred != <E as Environment>::Balance::from(0_u32) {
return Err(DispatchError::PaidUnpayableMessage)
}
Ok(())
}
/// Configuration for execution of ink! constructor.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteConstructorConfig {
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Executes the given ink! constructor.
///
/// # Note
///
/// The closure is supposed to already contain all the arguments that the real
/// constructor message requires and forwards them.
#[inline]
pub fn execute_constructor<Contract, F, R>(
config: ExecuteConstructorConfig,
f: F,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout + ContractRootKey,
F: FnOnce() -> R,
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode,
private::Seal<R>: ConstructorReturnType<Contract>,
{
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Deploy);
}
let result = ManuallyDrop::new(private::Seal(f()));
match result.as_result() {
Ok(contract) => {
// Constructor is infallible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn initialize_contract<Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue |
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: Execute | {
&self.0
} | identifier_body |
execution.rs | allible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn initialize_contract<Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&self.0
}
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: ExecuteMessageConfig,
) -> Result<Contract, DispatchError>
where
Contract: SpreadLayout + ContractEnv,
{
if !config.payable {
deny_payment::<<Contract as ContractEnv>::Env>()?;
}
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Call);
}
let root_key = Key::from([0x00; 32]);
let contract = pull_spread_root::<Contract>(&root_key);
Ok(contract)
}
/// Finalizes an ink! message call with the given configuration.
///
/// This dispatches into fallible and infallible message finalization
/// depending on the given `success` state.
///
/// - If the message call was successful the return value is simply returned
/// and cached storage is pushed back to the contract storage.
/// - If the message call failed the return value result is returned instead
/// and the transaction is signalled to be reverted.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn finalize_message<Contract, R>(
success: bool,
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if success {
finalize_infallible_message(contract, config, result)
} else {
finalize_fallible_message(result)
}
}
#[inline]
fn finalize_infallible_message<Contract, R>(
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if config.mutates {
let root_key = Key::from([0x00; 32]);
push_spread_root::<Contract>(contract, &root_key);
}
if config.dynamic_storage_alloc | {
alloc::finalize();
} | conditional_block | |
execution.rs | /// # Note
///
/// - This is the key where storage allocation, pushing and pulling is rooted
/// using the `SpreadLayout` and `SpreadAllocate` traits primarily.
/// - This trait is automatically implemented by the ink! codegen.
/// - The existence of this trait allows to customize the root key in future
/// versions of ink! if needed.
pub trait ContractRootKey {
const ROOT_KEY: Key;
}
/// Returns `Ok` if the caller did not transfer additional value to the callee.
///
/// # Errors
///
/// If the caller did send some amount of transferred value to the callee.
#[inline]
pub fn deny_payment<E>() -> Result<(), DispatchError>
where
E: Environment,
{
let transferred = ink_env::transferred_balance::<E>();
if transferred != <E as Environment>::Balance::from(0_u32) {
return Err(DispatchError::PaidUnpayableMessage)
}
Ok(())
}
/// Configuration for execution of ink! constructor.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteConstructorConfig {
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Executes the given ink! constructor.
///
/// # Note
///
/// The closure is supposed to already contain all the arguments that the real
/// constructor message requires and forwards them.
#[inline]
pub fn execute_constructor<Contract, F, R>(
config: ExecuteConstructorConfig,
f: F,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout + ContractRootKey,
F: FnOnce() -> R,
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode,
private::Seal<R>: ConstructorReturnType<Contract>,
{
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Deploy);
}
let result = ManuallyDrop::new(private::Seal(f()));
match result.as_result() {
Ok(contract) => {
// Constructor is infallible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn | <Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&self.0
}
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: ExecuteMessage | initialize_contract | identifier_name |
execution.rs | ///
/// # Note
///
/// - This is the key where storage allocation, pushing and pulling is rooted
/// using the `SpreadLayout` and `SpreadAllocate` traits primarily.
/// - This trait is automatically implemented by the ink! codegen.
/// - The existence of this trait allows to customize the root key in future
/// versions of ink! if needed.
pub trait ContractRootKey {
const ROOT_KEY: Key;
}
/// Returns `Ok` if the caller did not transfer additional value to the callee.
///
/// # Errors
///
/// If the caller did send some amount of transferred value to the callee.
#[inline]
pub fn deny_payment<E>() -> Result<(), DispatchError>
where
E: Environment,
{
let transferred = ink_env::transferred_balance::<E>();
if transferred != <E as Environment>::Balance::from(0_u32) {
return Err(DispatchError::PaidUnpayableMessage)
}
Ok(())
}
/// Configuration for execution of ink! constructor.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteConstructorConfig {
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Executes the given ink! constructor.
///
/// # Note
///
/// The closure is supposed to already contain all the arguments that the real
/// constructor message requires and forwards them.
#[inline]
pub fn execute_constructor<Contract, F, R>(
config: ExecuteConstructorConfig,
f: F,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout + ContractRootKey,
F: FnOnce() -> R,
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode,
private::Seal<R>: ConstructorReturnType<Contract>,
{
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Deploy);
}
let result = ManuallyDrop::new(private::Seal(f()));
match result.as_result() {
Ok(contract) => {
// Constructor is infallible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn initialize_contract<Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>` | /// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&self.0
}
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: ExecuteMessage | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.