file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
modes.go | /*
Copyright 2021 Sonobuoy Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"regexp"
"sort"
"strings"
"github.com/spf13/cobra"
)
type e2eModeOptions struct {
name string
desc string
focus, skip string
parallel bool
}
const (
// E2eModeQuick runs a single E2E test and the systemd log tests.
E2eModeQuick string = "quick"
// E2eModeNonDisruptiveConformance runs all of the `Conformance` E2E tests which are not marked as disuprtive and the systemd log tests.
E2eModeNonDisruptiveConformance string = "non-disruptive-conformance"
// E2eModeCertifiedConformance runs all of the `Conformance` E2E tests and the systemd log tests.
E2eModeCertifiedConformance string = "certified-conformance"
// nonDisruptiveSkipList should generally just need to skip disruptive tests since upstream
// will disallow the other types of tests from being tagged as Conformance. However, in v1.16
// two disruptive tests were not marked as such, meaning we needed to specify them here to ensure
// user workload safety. See https://github.com/kubernetes/kubernetes/issues/82663
// and https://github.com/kubernetes/kubernetes/issues/82787
nonDisruptiveSkipList = `\[Disruptive\]|NoExecuteTaintManager`
conformanceFocus = `\[Conformance\]`
quickFocus = "Pods should be submitted and removed"
E2eModeConformanceLite = "conformance-lite"
)
var (
liteSkips = []string{
"Serial", "Slow", "Disruptive",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]",
"[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance]",
"[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]",
"[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance]",
"[sig-network] DNS should provide DNS for services [Conformance]",
"[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]",
"[sig-apps] Job should delete a job [Conformance]",
"[sig-network] DNS should provide DNS for ExternalName services [Conformance]",
"[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance]",
"[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]",
"[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]",
"[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance]",
"[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance]",
"[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]",
"[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]",
"[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]",
`[sig-node] Probing container should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
"[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]",
"[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]",
"[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]",
"[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]",
"[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]",
"[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]",
"[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]",
"[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]",
"[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance]",
"[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]",
"[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]",
"[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance]",
`[sig-node] Probing container should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
"[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]",
"[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]",
"[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance]",
"[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]",
"[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance]",
`[k8s.io] Probing container should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
`[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]`,
`[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]`,
`[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]`,
`[k8s.io] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]`,
`[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]`,
`[k8s.io] Probing container should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
`[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]`,
`[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]`,
`[k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]`,
`[k8s.io] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]`,
`[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]`,
`[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]`,
}
)
// validModes is a map of the various valid modes. Name is duplicated as the key and in the e2eModeOptions itself.
var validModes = map[string]e2eModeOptions{
E2eModeQuick: {
name: E2eModeQuick, focus: quickFocus,
desc: "Quick mode runs a single test to create and destroy a pod. Fastest way to check basic cluster operation.",
},
E2eModeNonDisruptiveConformance: {
name: E2eModeNonDisruptiveConformance, focus: conformanceFocus, skip: nonDisruptiveSkipList,
desc: "Non-destructive conformance mode runs all of the conformance tests except those that would disrupt other cluster operations (e.g. tests that may cause nodes to be restarted or impact cluster permissions).",
},
E2eModeCertifiedConformance: {
name: E2eModeCertifiedConformance, focus: conformanceFocus,
desc: "Certified conformance mode runs the entire conformance suite, even disruptive tests. This is typically run in a dev environment to earn the CNCF Certified Kubernetes status.",
},
E2eModeConformanceLite: {
name: E2eModeConformanceLite, focus: conformanceFocus, skip: genLiteSkips(), parallel: true,
desc: "An unofficial mode of running the e2e tests which removes some of the longest running tests so that your tests can complete in the fastest time possible while maximizing coverage.", | }
func genLiteSkips() string {
quoted := make([]string, len(liteSkips))
for i, v := range liteSkips {
quoted[i] = regexp.QuoteMeta(v)
// Quotes will cause the regexp to explode; easy to just change them to wildcards without an issue.
quoted[i] = strings.ReplaceAll(quoted[i], `"`, ".")
}
return strings.Join(quoted, "|")
}
func validE2eModes() []string {
keys := []string{}
for key := range validModes {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
type modesOptions struct {
verbose bool
}
func NewCmdModes() *cobra.Command {
f := modesOptions{}
var modesCmd = &cobra.Command{
Use: "modes",
Short: "Display the various modes in which to run the e2e plugin",
Run: func(cmd *cobra.Command, args []string) {
showModes(f)
},
Args: cobra.ExactArgs(0),
}
modesCmd.Flags().BoolVar(&f.verbose, "verbose", false, "Do not truncate output for each mode.")
return modesCmd
}
func showModes(opt modesOptions) {
count := 0
if !opt.verbose {
count = 200
}
for i, key := range validE2eModes() {
opt := validModes[key]
if i != 0 {
fmt.Println("")
}
fmt.Println(truncate(fmt.Sprintf("Mode: %v", opt.name), count))
fmt.Println(truncate(fmt.Sprintf("Description: %v", opt.desc), count))
fmt.Println(truncate(fmt.Sprintf("E2E_FOCUS: %v", opt.focus), count))
fmt.Println(truncate(fmt.Sprintf("E2E_SKIP: %v", opt.skip), count))
fmt.Println(truncate(fmt.Sprintf("E2E_PARALLEL: %v", opt.parallel), count))
}
}
func truncate(s string, count int) string {
if count <= 0 {
return s
}
if len(s) <= count {
return s
}
return s[0:count] + "... (truncated) ..."
} | }, | random_line_split |
parser.py | #--------------------------------------------------------------------------------------------------#
# hsd-python: package for manipulating HSD-formatted data in Python #
# Copyright (C) 2011 - 2023 DFTB+ developers group #
# Licensed under the BSD 2-clause license. #
#--------------------------------------------------------------------------------------------------#
#
"""
Contains the event-generating HSD-parser.
"""
from typing import Optional, TextIO, Union
from hsd import common
from hsd.eventhandler import HsdEventHandler, HsdEventPrinter
SYNTAX_ERROR = 1
UNCLOSED_TAG_ERROR = 2
UNCLOSED_ATTRIB_ERROR = 3
UNCLOSED_QUOTATION_ERROR = 4
ORPHAN_TEXT_ERROR = 5
_GENERAL_SPECIALS = "{}[]<=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi':
{'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
"""
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign)
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def | (self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
def _closetag(self):
if not self._opened_tags:
self._error(SYNTAX_ERROR, (0, self._currline))
self._buffer = []
tag, _, closeprev, self._has_child, self._has_text = self._opened_tags.pop()
self._eventhandler.close_tag(tag)
if closeprev:
self._closetag()
def _include_hsd(self, fname):
fname = common.unquote(fname.strip())
parser = HsdParser(eventhandler=self._eventhandler)
parser.parse(fname)
@staticmethod
def _include_txt(fname):
fname = common.unquote(fname.strip())
with open(fname, "r") as fp:
txt = fp.read()
return txt
def _error(self, errorcode, lines):
error_msg = (
"Parsing error ({}) between lines {} - {} in file '{}'.".format(
errorcode, lines[0] + 1, lines[1] + 1, self._fname))
raise common.HsdError(error_msg)
def _splitbycharset(txt, charset):
"""Splits a string at the first occurrence of a character in a set.
Args:
txt: Text to split.
chars: Chars to look for.
Returns:
Tuple (char, before, after). Char is the character which had been found
(or empty string if nothing was found). Before is the substring before
the splitting character (or the entire string). After is the substring
after the splitting character (or empty string).
"""
for firstpos, char in enumerate(txt):
if char in charset:
return txt[firstpos], txt[:firstpos], txt[firstpos + 1:]
return '', txt, ''
| _text | identifier_name |
parser.py | #--------------------------------------------------------------------------------------------------#
# hsd-python: package for manipulating HSD-formatted data in Python #
# Copyright (C) 2011 - 2023 DFTB+ developers group #
# Licensed under the BSD 2-clause license. #
#--------------------------------------------------------------------------------------------------#
#
"""
Contains the event-generating HSD-parser.
"""
from typing import Optional, TextIO, Union
from hsd import common
from hsd.eventhandler import HsdEventHandler, HsdEventPrinter
SYNTAX_ERROR = 1
UNCLOSED_TAG_ERROR = 2
UNCLOSED_ATTRIB_ERROR = 3
UNCLOSED_QUOTATION_ERROR = 4
ORPHAN_TEXT_ERROR = 5
_GENERAL_SPECIALS = "{}[]<=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi':
{'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
"""
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
|
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def _text(self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
def _closetag(self):
if not self._opened_tags:
self._error(SYNTAX_ERROR, (0, self._currline))
self._buffer = []
tag, _, closeprev, self._has_child, self._has_text = self._opened_tags.pop()
self._eventhandler.close_tag(tag)
if closeprev:
self._closetag()
def _include_hsd(self, fname):
fname = common.unquote(fname.strip())
parser = HsdParser(eventhandler=self._eventhandler)
parser.parse(fname)
@staticmethod
def _include_txt(fname):
fname = common.unquote(fname.strip())
with open(fname, "r") as fp:
txt = fp.read()
return txt
def _error(self, errorcode, lines):
error_msg = (
"Parsing error ({}) between lines {} - {} in file '{}'.".format(
errorcode, lines[0] + 1, lines[1] + 1, self._fname))
raise common.HsdError(error_msg)
def _splitbycharset(txt, charset):
"""Splits a string at the first occurrence of a character in a set.
Args:
txt: Text to split.
chars: Chars to look for.
Returns:
Tuple (char, before, after). Char is the character which had been found
(or empty string if nothing was found). Before is the substring before
the splitting character (or the entire string). After is the substring
after the splitting character (or empty string).
"""
for firstpos, char in enumerate(txt):
if char in charset:
return txt[firstpos], txt[:firstpos], txt[firstpos + 1:]
return '', txt, ''
| txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign) | conditional_block |
parser.py | #--------------------------------------------------------------------------------------------------#
# hsd-python: package for manipulating HSD-formatted data in Python #
# Copyright (C) 2011 - 2023 DFTB+ developers group #
# Licensed under the BSD 2-clause license. #
#--------------------------------------------------------------------------------------------------#
#
"""
Contains the event-generating HSD-parser.
"""
from typing import Optional, TextIO, Union
from hsd import common
from hsd.eventhandler import HsdEventHandler, HsdEventPrinter
SYNTAX_ERROR = 1
UNCLOSED_TAG_ERROR = 2
UNCLOSED_ATTRIB_ERROR = 3
UNCLOSED_QUOTATION_ERROR = 4
ORPHAN_TEXT_ERROR = 5
_GENERAL_SPECIALS = "{}[]<=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi':
{'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
"""
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign)
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def _text(self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
def _closetag(self):
if not self._opened_tags:
self._error(SYNTAX_ERROR, (0, self._currline))
self._buffer = []
tag, _, closeprev, self._has_child, self._has_text = self._opened_tags.pop()
self._eventhandler.close_tag(tag)
if closeprev:
self._closetag()
def _include_hsd(self, fname):
|
@staticmethod
def _include_txt(fname):
fname = common.unquote(fname.strip())
with open(fname, "r") as fp:
txt = fp.read()
return txt
def _error(self, errorcode, lines):
error_msg = (
"Parsing error ({}) between lines {} - {} in file '{}'.".format(
errorcode, lines[0] + 1, lines[1] + 1, self._fname))
raise common.HsdError(error_msg)
def _splitbycharset(txt, charset):
"""Splits a string at the first occurrence of a character in a set.
Args:
txt: Text to split.
chars: Chars to look for.
Returns:
Tuple (char, before, after). Char is the character which had been found
(or empty string if nothing was found). Before is the substring before
the splitting character (or the entire string). After is the substring
after the splitting character (or empty string).
"""
for firstpos, char in enumerate(txt):
if char in charset:
return txt[firstpos], txt[:firstpos], txt[firstpos + 1:]
return '', txt, ''
| fname = common.unquote(fname.strip())
parser = HsdParser(eventhandler=self._eventhandler)
parser.parse(fname) | identifier_body |
parser.py | #--------------------------------------------------------------------------------------------------#
# hsd-python: package for manipulating HSD-formatted data in Python #
# Copyright (C) 2011 - 2023 DFTB+ developers group #
# Licensed under the BSD 2-clause license. #
#--------------------------------------------------------------------------------------------------#
#
"""
Contains the event-generating HSD-parser.
"""
from typing import Optional, TextIO, Union
from hsd import common
from hsd.eventhandler import HsdEventHandler, HsdEventPrinter
SYNTAX_ERROR = 1
UNCLOSED_TAG_ERROR = 2
UNCLOSED_ATTRIB_ERROR = 3
UNCLOSED_QUOTATION_ERROR = 4
ORPHAN_TEXT_ERROR = 5
_GENERAL_SPECIALS = "{}[]<=\"'#;"
_ATTRIB_SPECIALS = "]\"'"
class HsdParser:
"""Event based parser for the HSD format.
Arguments:
eventhandler: Object which should handle the HSD-events triggered
during parsing. When not specified, HsdEventPrinter() is used.
Examples:
>>> from io import StringIO
>>> dictbuilder = hsd.HsdDictBuilder()
>>> parser = hsd.HsdParser(eventhandler=dictbuilder)
>>> hsdfile = StringIO(\"\"\"
... Hamiltonian {
... Dftb {
... Scc = Yes
... Filling = Fermi {
... Temperature [Kelvin] = 100
... }
... }
... }
... \"\"\")
>>> parser.parse(hsdfile)
>>> dictbuilder.hsddict
{'Hamiltonian': {'Dftb': {'Scc': True, 'Filling': {'Fermi': |
def __init__(self, eventhandler: Optional[HsdEventHandler] = None):
"""Initializes the parser.
Args:
eventhandler: Instance of the HsdEventHandler class or its children.
"""
if eventhandler is None:
self._eventhandler = HsdEventPrinter()
else:
self._eventhandler = eventhandler
self._fname = "" # name of file being processed
self._checkstr = _GENERAL_SPECIALS # special characters to look for
self._oldcheckstr = "" # buffer fo checkstr
self._opened_tags = [] # info about opened tags
self._buffer = [] # buffering plain text between lines
self._attrib = None # attribute for current tag
self._hsdattrib = {} # hsd-options for current tag
self._currline = 0 # nr. of current line in file
self._after_equal_sign = False # last tag was opened with equal sign
self._inside_attrib = False # parser inside attrib specification
self._inside_quote = False # parser inside quotation
self._has_child = True # Whether current node has a child already
self._has_text = False # whether current node contains text already
self._oldbefore = "" # buffer for tagname
def parse(self, fobj: Union[TextIO, str]):
"""Parses the provided file-like object.
The parser will process the data and trigger the corresponding events
in the eventhandler which was passed at initialization.
Args:
fobj: File like object or name of a file containing the data.
"""
isfilename = isinstance(fobj, str)
if isfilename:
fp = open(fobj, "r")
self._fname = fobj
else:
fp = fobj
for line in fp.readlines():
self._parse(line)
self._currline += 1
if isfilename:
fp.close()
# Check for errors
if self._opened_tags:
line0 = self._opened_tags[-1][1]
else:
line0 = 0
if self._inside_quote:
self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))
elif self._inside_attrib:
self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))
elif self._opened_tags:
self._error(UNCLOSED_TAG_ERROR, (line0, line0))
elif ("".join(self._buffer)).strip():
self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))
def _parse(self, line):
"""Parses a given line."""
while True:
sign, before, after = _splitbycharset(line, self._checkstr)
# End of line
if not sign:
if self._inside_quote:
self._buffer.append(before)
elif self._after_equal_sign:
self._text("".join(self._buffer) + before.strip())
self._closetag()
self._after_equal_sign = False
elif not self._inside_attrib:
self._buffer.append(before)
elif before.strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
break
# Special character is escaped
elif before.endswith("\\") and not before.endswith("\\\\"):
self._buffer.append(before + sign)
# Equal sign
elif sign == "=":
# Ignore if followed by "{" (DFTB+ compatibility)
if after.lstrip().startswith("{"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append
self._oldbefore += before
else:
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True
self._starttag(before, False)
self._after_equal_sign = True
# Opening tag by curly brace
elif sign == "{":
#self._has_child = True
self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False
self._starttag(before, self._after_equal_sign)
self._buffer = []
self._after_equal_sign = False
# Closing tag by curly brace
elif sign == "}":
self._text("".join(self._buffer) + before)
self._buffer = []
# If 'test { a = 12 }' occurs, curly brace closes two tags
if self._after_equal_sign:
self._after_equal_sign = False
self._closetag()
self._closetag()
# Closing tag by semicolon
elif sign == ";" and self._after_equal_sign:
self._after_equal_sign = False
self._text(before)
self._closetag()
# Comment line
elif sign == "#":
self._buffer.append(before)
after = ""
# Opening attribute specification
elif sign == "[":
if "".join(self._buffer).strip():
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._oldbefore = before
self._buffer = []
self._inside_attrib = True
self._opened_tags.append(("[", self._currline, None, None, None))
self._checkstr = _ATTRIB_SPECIALS
# Closing attribute specification
elif sign == "]":
value = "".join(self._buffer) + before
self._attrib = value.strip()
self._inside_attrib = False
self._buffer = []
self._opened_tags.pop()
self._checkstr = _GENERAL_SPECIALS
# Quoting strings
elif sign in ("'", '"'):
if self._inside_quote:
self._checkstr = self._oldcheckstr
self._inside_quote = False
self._buffer.append(before + sign)
self._opened_tags.pop()
else:
self._oldcheckstr = self._checkstr
self._checkstr = sign
self._inside_quote = True
self._buffer.append(before + sign)
self._opened_tags.append(('"', self._currline, None, None, None))
# Interrupt
elif sign == "<" and not self._after_equal_sign:
txtinc = after.startswith("<<")
hsdinc = after.startswith("<+")
if txtinc:
self._text("".join(self._buffer) + before)
self._buffer = []
self._eventhandler.add_text(self._include_txt(after[2:]))
break
if hsdinc:
self._include_hsd(after[2:])
break
self._buffer.append(before + sign)
else:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
line = after
def _text(self, text):
stripped = text.strip()
if stripped:
if self._has_child:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._eventhandler.add_text(stripped)
self._has_text = True
def _starttag(self, tagname, closeprev):
txt = "".join(self._buffer)
if txt:
self._text(txt)
if self._has_text:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
tagname_stripped = tagname.strip()
if self._oldbefore:
if tagname_stripped:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
else:
tagname_stripped = self._oldbefore.strip()
if len(tagname_stripped.split()) > 1:
self._error(SYNTAX_ERROR, (self._currline, self._currline))
self._hsdattrib[common.HSD_ATTRIB_LINE] = self._currline
self._eventhandler.open_tag(tagname_stripped, self._attrib,
self._hsdattrib)
self._opened_tags.append(
(tagname_stripped, self._currline, closeprev, True, False))
self._has_child = False
self._buffer = []
self._oldbefore = ""
self._attrib = None
self._hsdattrib = {}
def _closetag(self):
if not self._opened_tags:
self._error(SYNTAX_ERROR, (0, self._currline))
self._buffer = []
tag, _, closeprev, self._has_child, self._has_text = self._opened_tags.pop()
self._eventhandler.close_tag(tag)
if closeprev:
self._closetag()
def _include_hsd(self, fname):
fname = common.unquote(fname.strip())
parser = HsdParser(eventhandler=self._eventhandler)
parser.parse(fname)
@staticmethod
def _include_txt(fname):
fname = common.unquote(fname.strip())
with open(fname, "r") as fp:
txt = fp.read()
return txt
def _error(self, errorcode, lines):
error_msg = (
"Parsing error ({}) between lines {} - {} in file '{}'.".format(
errorcode, lines[0] + 1, lines[1] + 1, self._fname))
raise common.HsdError(error_msg)
def _splitbycharset(txt, charset):
"""Splits a string at the first occurrence of a character in a set.
Args:
txt: Text to split.
chars: Chars to look for.
Returns:
Tuple (char, before, after). Char is the character which had been found
(or empty string if nothing was found). Before is the substring before
the splitting character (or the entire string). After is the substring
after the splitting character (or empty string).
"""
for firstpos, char in enumerate(txt):
if char in charset:
return txt[firstpos], txt[:firstpos], txt[firstpos + 1:]
return '', txt, '' | {'Temperature': 100, 'Temperature.attrib': 'Kelvin'}}}}}
""" | random_line_split |
main.py | import gym
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.utils import clip_grad_norm_
from collections import deque
env_name = "CartPole-v0"
env = gym.make(env_name)
print("action space: ", env.action_space.n)
print("observation space ", env.env.observation_space.shape[0])
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using: ", device)
GAMMA = 0.99
ETA = 1. # Scaling factor for the influence of the intrinsic Reward
BETA = 0.2
SCALAR_BETA = 0.1
EXTRINSIC_REWARD = False
ENTROPY_BONUS = 0.0001
LAMBDA = 0.95
CLIP_GRAD = .1
C_LR = 4e-4
A_LR = 4e-4
HIDDEN_SIZE = 64
class Critic(nn.Module):
def __init__(self, input_shape):
super(Critic, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 1))
def forward(self,x):
x = self.net(x)
return x
class Actor(nn.Module):
def __init__(self, input_shape, output_shape):
super(Actor, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
"""
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x)
def ICM(state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a_loss_list = []
icm_loss_list = []
entropy_list = []
intrinsic_rewards = []
average_100 = []
plot_rewards = []
max_steps = 2024
for ep in range(max_episodes+1):
state = env.reset()
done = False
state_batch = []
next_state_batch = []
value_batch = []
action_batch = []
logprob_batch = []
rewards_batch = []
masks = []
for step in range(max_steps):
state = torch.from_numpy(state).unsqueeze(0).float()
action, logprob, _ = actor(state.to(device))
value = critic(state.to(device))
next_state, reward, done, _ = env.step(action[0].cpu().numpy())
state_batch.append(state)
next_state_batch.append(torch.from_numpy(next_state).unsqueeze(0).float())
value_batch.append(value.item())
logprob_batch.append(logprob)
action_batch.append(action)
rewards_batch.append(reward)
masks.append(1 - done)
state = next_state
if done:
state = env.reset()
| state2_batch = torch.cat(next_state_batch)
actions_batch = torch.cat(action_batch)
forward_pred_err, inverse_pred_err = ICM(state1_batch, actions_batch, state2_batch)
rewards = ((1. / ETA) * forward_pred_err).detach()
intrinsic_rewards.append(rewards.mean().numpy())
if EXTRINSIC_REWARD == True:
rewards += torch.FloatTensor(rewards_batch).unsqueeze(1)
rewards_batch = list(rewards)
curiosity_loss = (1 - BETA) * inverse_pred_err + (BETA * forward_pred_err)
# calculate advantage:
next_value = critic(torch.from_numpy(next_state).unsqueeze(0).float()).item()
discounted_rewards, advantage = compute_gae(next_value, rewards_batch, masks, value_batch)
# normalize advantage:
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-5)
c_loss, a_loss, icm_loss = ppo_update(ppo_epochs = 5, mini_batch_size = 512, states = state_batch, actions = action_batch, log_probs = logprob_batch, advantage = advantage, discounted_rewards = discounted_rewards, curiosity_loss=curiosity_loss)
c_loss_list.append(c_loss)
a_loss_list.append(a_loss)
icm_loss_list.append(icm_loss)
if ep != 0 and ep % 10 == 0:
test_rewards, test_entropy, test_steps = test_net()
entropy_list.append(test_entropy)
plot_rewards.append(test_rewards)
average_100.append(np.mean(plot_rewards[-100:]))
print("\rEpisode: {} | Ep_Reward: {:.2f} | Average_100: {:.2f}".format(ep, test_rewards, np.mean(plot_rewards[-100:])), end = "", flush = True)
# PLOTTING RESULTS
plt.figure(figsize = (26,8))
plt.subplot(1,7,1)
plt.title("actor loss")
plt.plot(a_loss_list)
plt.subplot(1,7,2)
plt.title("critic loss")
plt.plot(c_loss_list)
plt.subplot(1,7,3)
plt.title("ICM loss")
plt.plot(icm_loss_list)
plt.subplot(1,7,4)
plt.title("entropy")
plt.plot(entropy_list)
plt.subplot(1,7,5)
plt.title("rewards")
plt.plot(plot_rewards)
plt.subplot(1,7,6)
plt.title("intrinsic rewards")
plt.plot(intrinsic_rewards)
plt.subplot(1,7,7)
plt.title("Average100")
plt.plot(average_100)
plt.show() |
# Intrinsic Curiosity Calculation
state1_batch = torch.cat(state_batch) | random_line_split |
main.py | import gym
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.utils import clip_grad_norm_
from collections import deque
env_name = "CartPole-v0"
env = gym.make(env_name)
print("action space: ", env.action_space.n)
print("observation space ", env.env.observation_space.shape[0])
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using: ", device)
GAMMA = 0.99
ETA = 1. # Scaling factor for the influence of the intrinsic Reward
BETA = 0.2
SCALAR_BETA = 0.1
EXTRINSIC_REWARD = False
ENTROPY_BONUS = 0.0001
LAMBDA = 0.95
CLIP_GRAD = .1
C_LR = 4e-4
A_LR = 4e-4
HIDDEN_SIZE = 64
class Critic(nn.Module):
def __init__(self, input_shape):
super(Critic, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 1))
def forward(self,x):
x = self.net(x)
return x
class Actor(nn.Module):
def __init__(self, input_shape, output_shape):
super(Actor, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
"""
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x)
def | (state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a_loss_list = []
icm_loss_list = []
entropy_list = []
intrinsic_rewards = []
average_100 = []
plot_rewards = []
max_steps = 2024
for ep in range(max_episodes+1):
state = env.reset()
done = False
state_batch = []
next_state_batch = []
value_batch = []
action_batch = []
logprob_batch = []
rewards_batch = []
masks = []
for step in range(max_steps):
state = torch.from_numpy(state).unsqueeze(0).float()
action, logprob, _ = actor(state.to(device))
value = critic(state.to(device))
next_state, reward, done, _ = env.step(action[0].cpu().numpy())
state_batch.append(state)
next_state_batch.append(torch.from_numpy(next_state).unsqueeze(0).float())
value_batch.append(value.item())
logprob_batch.append(logprob)
action_batch.append(action)
rewards_batch.append(reward)
masks.append(1 - done)
state = next_state
if done:
state = env.reset()
# Intrinsic Curiosity Calculation
state1_batch = torch.cat(state_batch)
state2_batch = torch.cat(next_state_batch)
actions_batch = torch.cat(action_batch)
forward_pred_err, inverse_pred_err = ICM(state1_batch, actions_batch, state2_batch)
rewards = ((1. / ETA) * forward_pred_err).detach()
intrinsic_rewards.append(rewards.mean().numpy())
if EXTRINSIC_REWARD == True:
rewards += torch.FloatTensor(rewards_batch).unsqueeze(1)
rewards_batch = list(rewards)
curiosity_loss = (1 - BETA) * inverse_pred_err + (BETA * forward_pred_err)
# calculate advantage:
next_value = critic(torch.from_numpy(next_state).unsqueeze(0).float()).item()
discounted_rewards, advantage = compute_gae(next_value, rewards_batch, masks, value_batch)
# normalize advantage:
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-5)
c_loss, a_loss, icm_loss = ppo_update(ppo_epochs = 5, mini_batch_size = 512, states = state_batch, actions = action_batch, log_probs = logprob_batch, advantage = advantage, discounted_rewards = discounted_rewards, curiosity_loss=curiosity_loss)
c_loss_list.append(c_loss)
a_loss_list.append(a_loss)
icm_loss_list.append(icm_loss)
if ep != 0 and ep % 10 == 0:
test_rewards, test_entropy, test_steps = test_net()
entropy_list.append(test_entropy)
plot_rewards.append(test_rewards)
average_100.append(np.mean(plot_rewards[-100:]))
print("\rEpisode: {} | Ep_Reward: {:.2f} | Average_100: {:.2f}".format(ep, test_rewards, np.mean(plot_rewards[-100:])), end = "", flush = True)
# PLOTTING RESULTS
plt.figure(figsize = (26,8))
plt.subplot(1,7,1)
plt.title("actor loss")
plt.plot(a_loss_list)
plt.subplot(1,7,2)
plt.title("critic loss")
plt.plot(c_loss_list)
plt.subplot(1,7,3)
plt.title("ICM loss")
plt.plot(icm_loss_list)
plt.subplot(1,7,4)
plt.title("entropy")
plt.plot(entropy_list)
plt.subplot(1,7,5)
plt.title("rewards")
plt.plot(plot_rewards)
plt.subplot(1,7,6)
plt.title("intrinsic rewards")
plt.plot(intrinsic_rewards)
plt.subplot(1,7,7)
plt.title("Average100")
plt.plot(average_100)
plt.show()
| ICM | identifier_name |
main.py | import gym
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.utils import clip_grad_norm_
from collections import deque
env_name = "CartPole-v0"
env = gym.make(env_name)
print("action space: ", env.action_space.n)
print("observation space ", env.env.observation_space.shape[0])
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using: ", device)
GAMMA = 0.99
ETA = 1. # Scaling factor for the influence of the intrinsic Reward
BETA = 0.2
SCALAR_BETA = 0.1
EXTRINSIC_REWARD = False
ENTROPY_BONUS = 0.0001
LAMBDA = 0.95
CLIP_GRAD = .1
C_LR = 4e-4
A_LR = 4e-4
HIDDEN_SIZE = 64
class Critic(nn.Module):
def __init__(self, input_shape):
super(Critic, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 1))
def forward(self,x):
x = self.net(x)
return x
class Actor(nn.Module):
def __init__(self, input_shape, output_shape):
super(Actor, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
"""
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x)
def ICM(state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
|
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a_loss_list = []
icm_loss_list = []
entropy_list = []
intrinsic_rewards = []
average_100 = []
plot_rewards = []
max_steps = 2024
for ep in range(max_episodes+1):
state = env.reset()
done = False
state_batch = []
next_state_batch = []
value_batch = []
action_batch = []
logprob_batch = []
rewards_batch = []
masks = []
for step in range(max_steps):
state = torch.from_numpy(state).unsqueeze(0).float()
action, logprob, _ = actor(state.to(device))
value = critic(state.to(device))
next_state, reward, done, _ = env.step(action[0].cpu().numpy())
state_batch.append(state)
next_state_batch.append(torch.from_numpy(next_state).unsqueeze(0).float())
value_batch.append(value.item())
logprob_batch.append(logprob)
action_batch.append(action)
rewards_batch.append(reward)
masks.append(1 - done)
state = next_state
if done:
state = env.reset()
# Intrinsic Curiosity Calculation
state1_batch = torch.cat(state_batch)
state2_batch = torch.cat(next_state_batch)
actions_batch = torch.cat(action_batch)
forward_pred_err, inverse_pred_err = ICM(state1_batch, actions_batch, state2_batch)
rewards = ((1. / ETA) * forward_pred_err).detach()
intrinsic_rewards.append(rewards.mean().numpy())
if EXTRINSIC_REWARD == True:
rewards += torch.FloatTensor(rewards_batch).unsqueeze(1)
rewards_batch = list(rewards)
curiosity_loss = (1 - BETA) * inverse_pred_err + (BETA * forward_pred_err)
# calculate advantage:
next_value = critic(torch.from_numpy(next_state).unsqueeze(0).float()).item()
discounted_rewards, advantage = compute_gae(next_value, rewards_batch, masks, value_batch)
# normalize advantage:
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-5)
c_loss, a_loss, icm_loss = ppo_update(ppo_epochs = 5, mini_batch_size = 512, states = state_batch, actions = action_batch, log_probs = logprob_batch, advantage = advantage, discounted_rewards = discounted_rewards, curiosity_loss=curiosity_loss)
c_loss_list.append(c_loss)
a_loss_list.append(a_loss)
icm_loss_list.append(icm_loss)
if ep != 0 and ep % 10 == 0:
test_rewards, test_entropy, test_steps = test_net()
entropy_list.append(test_entropy)
plot_rewards.append(test_rewards)
average_100.append(np.mean(plot_rewards[-100:]))
print("\rEpisode: {} | Ep_Reward: {:.2f} | Average_100: {:.2f}".format(ep, test_rewards, np.mean(plot_rewards[-100:])), end = "", flush = True)
# PLOTTING RESULTS
plt.figure(figsize = (26,8))
plt.subplot(1,7,1)
plt.title("actor loss")
plt.plot(a_loss_list)
plt.subplot(1,7,2)
plt.title("critic loss")
plt.plot(c_loss_list)
plt.subplot(1,7,3)
plt.title("ICM loss")
plt.plot(icm_loss_list)
plt.subplot(1,7,4)
plt.title("entropy")
plt.plot(entropy_list)
plt.subplot(1,7,5)
plt.title("rewards")
plt.plot(plot_rewards)
plt.subplot(1,7,6)
plt.title("intrinsic rewards")
plt.plot(intrinsic_rewards)
plt.subplot(1,7,7)
plt.title("Average100")
plt.plot(average_100)
plt.show()
| for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step() | conditional_block |
main.py | import gym
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.utils import clip_grad_norm_
from collections import deque
env_name = "CartPole-v0"
env = gym.make(env_name)
print("action space: ", env.action_space.n)
print("observation space ", env.env.observation_space.shape[0])
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using: ", device)
GAMMA = 0.99
ETA = 1. # Scaling factor for the influence of the intrinsic Reward
BETA = 0.2
SCALAR_BETA = 0.1
EXTRINSIC_REWARD = False
ENTROPY_BONUS = 0.0001
LAMBDA = 0.95
CLIP_GRAD = .1
C_LR = 4e-4
A_LR = 4e-4
HIDDEN_SIZE = 64
class Critic(nn.Module):
def __init__(self, input_shape):
super(Critic, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 1))
def forward(self,x):
x = self.net(x)
return x
class Actor(nn.Module):
def __init__(self, input_shape, output_shape):
super(Actor, self).__init__()
self.net = nn.Sequential(nn.Linear(input_shape, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE,HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, output_shape),
nn.Softmax(dim=1)
)
def forward(self, x):
probs = self.net(x)
dist = Categorical(probs)
actions = dist.sample()
logprobs = dist.log_prob(actions)
return actions, logprobs, dist
class Encoder(nn.Module):
def __init__(self, state_size, enc_state_size=12, hidden_size=64):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(nn.Linear(state_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, enc_state_size))
def forward(self,x):
return self.encoder(x)
class Inverse(nn.Module):
"""
1. (first submodel) encodes the state and next state into feature space.
2. (second submodel) the inverse approximates the action taken by the given state and next state in feature size
returns the predicted action and the encoded state for the Forward Model and the encoded next state to train the forward model!
optimizing the Inverse model by the loss between actual action taken by the current policy and the predicted action by the inverse model
"""
def __init__(self, action_size, enc_state_size, hidden_size=64):
super(Inverse, self).__init__()
self.inverse = nn.Sequential(nn.Linear(enc_state_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
nn.Softmax(dim=1))
def forward(self, state1,state2):
x = torch.cat( (state1, state2), dim=1)
return self.inverse(x)
class Forward(nn.Module):
def __init__(self, enc_state_size, OHE_size=12, hidden_size=64):
super(Forward, self).__init__()
self.OHE_size = OHE_size
self.forwardM = nn.Sequential(nn.Linear(enc_state_size+self.OHE_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,enc_state_size))
def forward(self,state,action):
|
def ICM(state1, action, state2, forward_scale=1., inverse_scale=1e4):
"""
"""
state1_hat = encoder(state1)
state2_hat = encoder(state2)
state2_hat_pred = forwardM(state1_hat.detach(), action.detach())
forward_pred_err = forward_scale * forward_loss(state2_hat_pred, state2_hat.detach()).sum(dim=1).unsqueeze(dim=1)
pred_action = inverse(state1_hat, state2_hat)
inverse_pred_err = inverse_scale * inverse_loss(pred_action, action.detach().flatten().long()).unsqueeze(dim=1)
return forward_pred_err, inverse_pred_err
def test_net( count = 10):
rewards = 0.0
steps = 0
entropys = 0.0
for _ in range(count):
obs = env.reset()
while True:
obs_v = torch.from_numpy(obs).unsqueeze(0).float()
action, _, dist = actor(obs_v.to(device))
entropy = dist.entropy().detach().cpu().numpy()
obs, reward, done, info = env.step(action[0].cpu().numpy())
rewards += reward
entropys += entropy.mean()
steps += 1
if done:
break
return rewards/count, entropys/count, steps/count
def compute_gae(next_value, rewards, masks, values, gamma=GAMMA, lambda_=LAMBDA):
"""
lambda => 1: high variance, low bias
lambda => 0: low variance, high bias
"""
values.append(next_value)
gae = 0
disc_returns = []
advantage = []
for step in reversed(range(len(rewards))):
# d = r_t +gamma*V(s_t+1) - V(s)
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
# sum(lambda*gamma)^t* delta_t+1
gae = delta + gamma * lambda_ * masks[step] * gae
disc_returns.insert(0, gae + values[step]) # adding values since we want the returns and not the advantage yet! A(a,s) = Q"returns" - V(s)
advantage.insert(0, gae)
return torch.FloatTensor(disc_returns).unsqueeze(1), torch.FloatTensor(advantage).unsqueeze(1)
def ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
batch_size = len(states)
#print(batch_size)
for i in range(batch_size // mini_batch_size):
rand_ids = np.random.randint(0, batch_size, mini_batch_size)
yield torch.cat(states)[rand_ids], torch.cat(actions)[rand_ids], torch.cat(log_probs)[rand_ids], advantage[rand_ids], discounted_rewards[rand_ids], curiosity_loss[rand_ids]
def ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss, eps_clip=0.2):
"""
"""
a_loss_batch = []
c_loss_batch = []
icm_loss_batch = []
for _ in range(ppo_epochs):
for states_i, old_actions, old_logprobs, advantage_i, discounted_reward_i, cur_loss in ppo_iter(mini_batch_size, states, actions, log_probs, advantage, discounted_rewards, curiosity_loss):
optimizer.zero_grad()
#c_optimizer.zero_grad()
#tran critic
new_value = critic(states_i.to(device))
c_loss = .5 * F.mse_loss(new_value, discounted_reward_i)
#clip_grad_norm_(critic.parameters(),CLIP_GRAD)
#c_loss.backward()
#c_optimizer.step()
c_loss_batch.append(c_loss.detach().numpy())
#train actor
#a_optimizer.zero_grad()
_, _, dist = actor(states_i.to(device))
new_logprobs = dist.log_prob(old_actions)
entropy = dist.entropy().mean()
ratio = torch.exp(new_logprobs - old_logprobs.detach())
surr = ratio * advantage_i
clip = torch.clamp(ratio, 1.0 - eps_clip, 1.0 + eps_clip)
a_loss = - (torch.min(surr, clip*advantage_i).mean()) + ENTROPY_BONUS * entropy
#clip_grad_norm_(actor.parameters(),CLIP_GRAD)
#a_loss.backward()
#a_optimizer.step()
a_loss_batch.append(a_loss.detach().numpy())
#train icm
#icm_optimizer.zero_grad()
cur_loss = cur_loss.mean()
#cur_loss.backward(retain_graph=True)
#icm_optimizer.step()
icm_loss_batch.append(cur_loss.detach().numpy())
#when calculated combined loss:
overall_loss = SCALAR_BETA * (c_loss + a_loss) + cur_loss
overall_loss.backward(retain_graph=True)
optimizer.step()
return np.array(c_loss_batch).mean(), np.array(a_loss_batch).mean(), np.array(icm_loss_batch)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
env.seed(42)
input_shape = env.observation_space.shape[0]
output_shape = env.action_space.n
actor = Actor(input_shape, output_shape).to(device)
critic = Critic(input_shape).to(device)
encoder = Encoder(state_size=input_shape, enc_state_size=2)
inverse = Inverse(action_size=output_shape, enc_state_size=2)
forwardM = Forward(enc_state_size=2, OHE_size=2)
forward_loss = nn.MSELoss(reduction='none')
inverse_loss = nn.CrossEntropyLoss(reduction='none')
all_parameters = list(actor.parameters())+list(critic.parameters())+list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
optimizer = optim.RMSprop(params=all_parameters, lr=A_LR)
# a_optimizer = optim.RMSprop(params=actor.parameters(), lr=A_LR)
# c_optimizer = optim.RMSprop(params=critic.parameters(), lr=C_LR)
# icm_params = list(encoder.parameters())+list(inverse.parameters())+list(forwardM.parameters())
# icm_optimizer = optim.Adam(params=icm_params, lr = 1e-3)
max_episodes = 550
c_loss_list = []
a_loss_list = []
icm_loss_list = []
entropy_list = []
intrinsic_rewards = []
average_100 = []
plot_rewards = []
max_steps = 2024
for ep in range(max_episodes+1):
state = env.reset()
done = False
state_batch = []
next_state_batch = []
value_batch = []
action_batch = []
logprob_batch = []
rewards_batch = []
masks = []
for step in range(max_steps):
state = torch.from_numpy(state).unsqueeze(0).float()
action, logprob, _ = actor(state.to(device))
value = critic(state.to(device))
next_state, reward, done, _ = env.step(action[0].cpu().numpy())
state_batch.append(state)
next_state_batch.append(torch.from_numpy(next_state).unsqueeze(0).float())
value_batch.append(value.item())
logprob_batch.append(logprob)
action_batch.append(action)
rewards_batch.append(reward)
masks.append(1 - done)
state = next_state
if done:
state = env.reset()
# Intrinsic Curiosity Calculation
state1_batch = torch.cat(state_batch)
state2_batch = torch.cat(next_state_batch)
actions_batch = torch.cat(action_batch)
forward_pred_err, inverse_pred_err = ICM(state1_batch, actions_batch, state2_batch)
rewards = ((1. / ETA) * forward_pred_err).detach()
intrinsic_rewards.append(rewards.mean().numpy())
if EXTRINSIC_REWARD == True:
rewards += torch.FloatTensor(rewards_batch).unsqueeze(1)
rewards_batch = list(rewards)
curiosity_loss = (1 - BETA) * inverse_pred_err + (BETA * forward_pred_err)
# calculate advantage:
next_value = critic(torch.from_numpy(next_state).unsqueeze(0).float()).item()
discounted_rewards, advantage = compute_gae(next_value, rewards_batch, masks, value_batch)
# normalize advantage:
advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-5)
c_loss, a_loss, icm_loss = ppo_update(ppo_epochs = 5, mini_batch_size = 512, states = state_batch, actions = action_batch, log_probs = logprob_batch, advantage = advantage, discounted_rewards = discounted_rewards, curiosity_loss=curiosity_loss)
c_loss_list.append(c_loss)
a_loss_list.append(a_loss)
icm_loss_list.append(icm_loss)
if ep != 0 and ep % 10 == 0:
test_rewards, test_entropy, test_steps = test_net()
entropy_list.append(test_entropy)
plot_rewards.append(test_rewards)
average_100.append(np.mean(plot_rewards[-100:]))
print("\rEpisode: {} | Ep_Reward: {:.2f} | Average_100: {:.2f}".format(ep, test_rewards, np.mean(plot_rewards[-100:])), end = "", flush = True)
# PLOTTING RESULTS
plt.figure(figsize = (26,8))
plt.subplot(1,7,1)
plt.title("actor loss")
plt.plot(a_loss_list)
plt.subplot(1,7,2)
plt.title("critic loss")
plt.plot(c_loss_list)
plt.subplot(1,7,3)
plt.title("ICM loss")
plt.plot(icm_loss_list)
plt.subplot(1,7,4)
plt.title("entropy")
plt.plot(entropy_list)
plt.subplot(1,7,5)
plt.title("rewards")
plt.plot(plot_rewards)
plt.subplot(1,7,6)
plt.title("intrinsic rewards")
plt.plot(intrinsic_rewards)
plt.subplot(1,7,7)
plt.title("Average100")
plt.plot(average_100)
plt.show()
| """
Gets as inputs the aciton taken from the policy and the encoded state by the encoder in the inverse model.
The froward model trys to predict the encoded next state.
Returns the predicted encoded next state.
Gets optimized by the MSE between the actual encoded next state and the predicted version of the forward model!
"""
action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE
indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)
indices = indices.tolist()
action_[indices] = 1.
x = torch.cat( (state,action_) ,dim=1)
return self.forwardM(x) | identifier_body |
TimeSlotGroup.js | import PropTypes from 'prop-types'
import React, { Component } from 'react'
import cn from 'classnames'
import TimeSlot from './TimeSlot'
import date from './utils/dates.js'
import localizer from './header/localizer'
import { elementType, dateFormat } from './utils/propTypes'
import EventSlot from './EventSlot';
import { DropTarget } from 'react-dnd';
import MasterListSlot from './MasterListSlot';
import moment from 'moment';
import Modal from '../../Modal/index';
import Button from "../../Button";
const squareTarget = {
drop(props) {
props.transferTraining(props.value); // drag and drop
console.log('DROP props :', props);
//moveKnight(props.x, props.y);
},
// hover(props, monitor, component) {
// // This is fired very often and lets you perform side effects
// // in response to the hover. You can't handle enter and leave
// // here—if you need them, put monitor.isOver() into collect() so you
// // can just use componentDidUpdate() to handle enter/leave.
// }
};
function collect(connect, monitor){
return{
connectDropTarget: connect.dropTarget(),
hovered: monitor.isOver(),
item: monitor.getItem(),
}
}
class TimeSlotGroup extends Component {
static propTypes = {
dayWrapperComponent: elementType,
timeslots: PropTypes.number.isRequired,
step: PropTypes.number.isRequired,
value: PropTypes.instanceOf(Date).isRequired,
showLabels: PropTypes.bool,
isNow: PropTypes.bool,
slotPropGetter: PropTypes.func,
timeGutterFormat: dateFormat,
culture: PropTypes.string,
resource: PropTypes.string,
}
static defaultProps = {
intervals: [],
timeslots: 2,
step: 30,
isNow: false,
showLabels: false,
freeTrainers: null,
}
constructor(props) {
super(props);
this.state = {
modalWasTransfer: false,
modalTooLateTransfer: false,
}
};
showWasTransferModal = () => {
this.setState({modalWasTransfer: true});
}
showTooLateTransferModal = () => {
this.setState({modalTooLateTransfer: true});
}
renderSlice(slotNumber, content, value) {
const {
dayWrapperComponent,
showLabels,
isNow,
culture,
resource,
slotPropGetter,
showTransferEvent, //my
} = this.props
return (
<TimeSlot
key={slotNumber}
slotNumber={slotNumber}
slotPropGetter={slotPropGetter}
dayWrapperComponent={dayWrapperComponent}
showLabel={showLabels}
content={content}
culture={culture}
isNow={isNow}
resource={resource}
value={value}
showTransferEvent={showTransferEvent}
/>
)
}
renderSlices() {
const ret = []
const sliceLength = this.props.step
let sliceValue = this.props.value;
for (let i = 0; i < this.props.timeslots; i++) {
const content = localizer.format(
sliceValue,
'HH:mm',
this.props.culture
);
ret.push(this.renderSlice(i, content, sliceValue))
sliceValue = date.add(sliceValue, sliceLength, 'minutes')
}
return ret
}
renderEvent = () => {
let {
events,
showTransferEvent,
freeTrainers,
setChoosenTrainer,
showLabels,
handleDrop,
onCancelTraining,
trainerTraining,
mode,
onGotoPage,
isPushBtnTransfer,
} = this.props;
const valueTime = this.props.value.getTime()
for( let i = 0; i < events.length; i++){
if(events[i].start.getTime() === valueTime && showLabels) {
return (
<EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
event={events[i]}
showTransferEvent={showTransferEvent}
setChoosenTrainer={this.props.setChoosenTrainer}
freeTrainers={freeTrainers}Б
idEvent={events[i].start.getTime()}
handleDrop={handleDrop}
setAbonement_Training = {this.props.setAbonement_Training}
onCancelTraining = {onCancelTraining}
mode = {mode}
onGotoPage = {onGotoPage}
isPushBtnTransfer = {isPushBtnTransfer}
deleteTraining = {this.props.deleteTraining}
deleteEventApiPatient={this.props.deleteEventApiPatient}
clickOnEvent={this.props.clickOnEvent}
selectIdEvent={this.props.selectIdEvent}
showTooLateTransferModal={this.showTooLateTransferModal}
showWasTransferModal={this.showWasTransferModal}
/>)
}
}
if(freeTrainers && freeTrainers.idEvent === this.props.value.getTime() && !showLabels){ // рендер выпадающего списка freeTrainer
return <EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
showTransferEvent={showTransferEvent}
freeTrainers={freeTrainers}
setChoosenTrainer={this.props.setChoosenTrainer}
idEvent={freeTrainers.idEvent}
onGotoPage = {onGotoPage}
deleteEventApiPatient = {this.props.deleteEventApiPatient}
/>
}
return null;
}
renderMasterList = () => {
const {masterList, value, showMasterList} = this.props;
let freetrainers = [];
let busytrainers = [];
for(let elem in masterList){
if(elem === 'freetrainers') {
freetrainers = masterList[elem]
}
if(elem === 'busytrainers'){
busytrainers = masterList[elem]
}
}
if(freetrainers.length || busytrainers.length)
return (
<MasterListSlot
key={value.getTime()}
freetrainers = {freetrainers}
busytrainers = {busytrainers}
value = {value.getTime()}
showMasterList = {showMasterList}
/>
)
}
showModalTransferEvent = (idValue) => {
this.props.showModalTransferEvent(idValue);
}
render() {
//drag and drop
| const { connectDropTarget, hovered, item} = this.props;
const backgroundColor= hovered ? '#e8f8fc ' : 'white';
const {intervals, value, freeTrainers, isAdmin} = this.props;
let valuetM = value.getTime();
const flag = Array.isArray(intervals) ? intervals.some(el => {
if(Array.isArray(el.intervals)){
for(let i = 0; i < el.intervals.length; i++){
if(value.getTime() >= el.intervals[i].start*1000 && value.getTime() < el.intervals[i].end * 1000)
return true
}
}
else{
if((valuetM >= el.start*1000) && valuetM < (el.end * 1000)) return true
}
}) : null
const isViewTrainer = (freeTrainers && freeTrainers.idEvent === this.props.value.getTime()) ? true : false;//не OK если таместь freeTrainers
const currentEvent = this.renderEvent();
let cellClass = cn('rbc-timeslot-group', flag && !isViewTrainer && !currentEvent ? 'rbc-timeslot-group-OK' : 'rbc-timeslot-group-NOT');
const modalTransferEvent = flag && !isViewTrainer && !currentEvent ? this.showModalTransferEvent : () => {}; // перенос тренировки
if(isAdmin) {
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{this.renderMasterList()}
</div>
)
}
if(flag && !isViewTrainer && !currentEvent){
return connectDropTarget(
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
</div>
)
}
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
<Modal
title='Сообщение'
visible={this.state.modalTooLateTransfer}
onCancel={() => this.setState({modalTooLateTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Переносить тренировку можно только за 24 часа до тренировки
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalTooLateTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
<Modal
title='Сообщение'
visible={this.state.modalWasTransfer}
onCancel={() => this.setState({modalWasTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Одну тренировку можно переносить лишь один раз
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalWasTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
</div>
)
}
}
export default DropTarget('event-group', squareTarget, collect)(TimeSlotGroup); | identifier_body | |
TimeSlotGroup.js | import PropTypes from 'prop-types'
import React, { Component } from 'react'
import cn from 'classnames'
import TimeSlot from './TimeSlot'
import date from './utils/dates.js'
import localizer from './header/localizer'
import { elementType, dateFormat } from './utils/propTypes'
import EventSlot from './EventSlot';
import { DropTarget } from 'react-dnd';
import MasterListSlot from './MasterListSlot';
import moment from 'moment';
import Modal from '../../Modal/index';
import Button from "../../Button";
const squareTarget = {
| (props) {
props.transferTraining(props.value); // drag and drop
console.log('DROP props :', props);
//moveKnight(props.x, props.y);
},
// hover(props, monitor, component) {
// // This is fired very often and lets you perform side effects
// // in response to the hover. You can't handle enter and leave
// // here—if you need them, put monitor.isOver() into collect() so you
// // can just use componentDidUpdate() to handle enter/leave.
// }
};
function collect(connect, monitor){
return{
connectDropTarget: connect.dropTarget(),
hovered: monitor.isOver(),
item: monitor.getItem(),
}
}
class TimeSlotGroup extends Component {
static propTypes = {
dayWrapperComponent: elementType,
timeslots: PropTypes.number.isRequired,
step: PropTypes.number.isRequired,
value: PropTypes.instanceOf(Date).isRequired,
showLabels: PropTypes.bool,
isNow: PropTypes.bool,
slotPropGetter: PropTypes.func,
timeGutterFormat: dateFormat,
culture: PropTypes.string,
resource: PropTypes.string,
}
static defaultProps = {
intervals: [],
timeslots: 2,
step: 30,
isNow: false,
showLabels: false,
freeTrainers: null,
}
constructor(props) {
super(props);
this.state = {
modalWasTransfer: false,
modalTooLateTransfer: false,
}
};
showWasTransferModal = () => {
this.setState({modalWasTransfer: true});
}
showTooLateTransferModal = () => {
this.setState({modalTooLateTransfer: true});
}
renderSlice(slotNumber, content, value) {
const {
dayWrapperComponent,
showLabels,
isNow,
culture,
resource,
slotPropGetter,
showTransferEvent, //my
} = this.props
return (
<TimeSlot
key={slotNumber}
slotNumber={slotNumber}
slotPropGetter={slotPropGetter}
dayWrapperComponent={dayWrapperComponent}
showLabel={showLabels}
content={content}
culture={culture}
isNow={isNow}
resource={resource}
value={value}
showTransferEvent={showTransferEvent}
/>
)
}
renderSlices() {
const ret = []
const sliceLength = this.props.step
let sliceValue = this.props.value;
for (let i = 0; i < this.props.timeslots; i++) {
const content = localizer.format(
sliceValue,
'HH:mm',
this.props.culture
);
ret.push(this.renderSlice(i, content, sliceValue))
sliceValue = date.add(sliceValue, sliceLength, 'minutes')
}
return ret
}
renderEvent = () => {
let {
events,
showTransferEvent,
freeTrainers,
setChoosenTrainer,
showLabels,
handleDrop,
onCancelTraining,
trainerTraining,
mode,
onGotoPage,
isPushBtnTransfer,
} = this.props;
const valueTime = this.props.value.getTime()
for( let i = 0; i < events.length; i++){
if(events[i].start.getTime() === valueTime && showLabels) {
return (
<EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
event={events[i]}
showTransferEvent={showTransferEvent}
setChoosenTrainer={this.props.setChoosenTrainer}
freeTrainers={freeTrainers}Б
idEvent={events[i].start.getTime()}
handleDrop={handleDrop}
setAbonement_Training = {this.props.setAbonement_Training}
onCancelTraining = {onCancelTraining}
mode = {mode}
onGotoPage = {onGotoPage}
isPushBtnTransfer = {isPushBtnTransfer}
deleteTraining = {this.props.deleteTraining}
deleteEventApiPatient={this.props.deleteEventApiPatient}
clickOnEvent={this.props.clickOnEvent}
selectIdEvent={this.props.selectIdEvent}
showTooLateTransferModal={this.showTooLateTransferModal}
showWasTransferModal={this.showWasTransferModal}
/>)
}
}
if(freeTrainers && freeTrainers.idEvent === this.props.value.getTime() && !showLabels){ // рендер выпадающего списка freeTrainer
return <EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
showTransferEvent={showTransferEvent}
freeTrainers={freeTrainers}
setChoosenTrainer={this.props.setChoosenTrainer}
idEvent={freeTrainers.idEvent}
onGotoPage = {onGotoPage}
deleteEventApiPatient = {this.props.deleteEventApiPatient}
/>
}
return null;
}
renderMasterList = () => {
const {masterList, value, showMasterList} = this.props;
let freetrainers = [];
let busytrainers = [];
for(let elem in masterList){
if(elem === 'freetrainers') {
freetrainers = masterList[elem]
}
if(elem === 'busytrainers'){
busytrainers = masterList[elem]
}
}
if(freetrainers.length || busytrainers.length)
return (
<MasterListSlot
key={value.getTime()}
freetrainers = {freetrainers}
busytrainers = {busytrainers}
value = {value.getTime()}
showMasterList = {showMasterList}
/>
)
}
showModalTransferEvent = (idValue) => {
this.props.showModalTransferEvent(idValue);
}
render() {
//drag and drop
const { connectDropTarget, hovered, item} = this.props;
const backgroundColor= hovered ? '#e8f8fc ' : 'white';
const {intervals, value, freeTrainers, isAdmin} = this.props;
let valuetM = value.getTime();
const flag = Array.isArray(intervals) ? intervals.some(el => {
if(Array.isArray(el.intervals)){
for(let i = 0; i < el.intervals.length; i++){
if(value.getTime() >= el.intervals[i].start*1000 && value.getTime() < el.intervals[i].end * 1000)
return true
}
}
else{
if((valuetM >= el.start*1000) && valuetM < (el.end * 1000)) return true
}
}) : null
const isViewTrainer = (freeTrainers && freeTrainers.idEvent === this.props.value.getTime()) ? true : false;//не OK если таместь freeTrainers
const currentEvent = this.renderEvent();
let cellClass = cn('rbc-timeslot-group', flag && !isViewTrainer && !currentEvent ? 'rbc-timeslot-group-OK' : 'rbc-timeslot-group-NOT');
const modalTransferEvent = flag && !isViewTrainer && !currentEvent ? this.showModalTransferEvent : () => {}; // перенос тренировки
if(isAdmin) {
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{this.renderMasterList()}
</div>
)
}
if(flag && !isViewTrainer && !currentEvent){
return connectDropTarget(
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
</div>
)
}
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
<Modal
title='Сообщение'
visible={this.state.modalTooLateTransfer}
onCancel={() => this.setState({modalTooLateTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Переносить тренировку можно только за 24 часа до тренировки
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalTooLateTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
<Modal
title='Сообщение'
visible={this.state.modalWasTransfer}
onCancel={() => this.setState({modalWasTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Одну тренировку можно переносить лишь один раз
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalWasTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
</div>
)
}
}
export default DropTarget('event-group', squareTarget, collect)(TimeSlotGroup); | drop | identifier_name |
TimeSlotGroup.js | import PropTypes from 'prop-types'
import React, { Component } from 'react'
import cn from 'classnames'
import TimeSlot from './TimeSlot'
import date from './utils/dates.js'
import localizer from './header/localizer'
import { elementType, dateFormat } from './utils/propTypes'
import EventSlot from './EventSlot';
import { DropTarget } from 'react-dnd';
import MasterListSlot from './MasterListSlot';
import moment from 'moment';
import Modal from '../../Modal/index';
import Button from "../../Button";
const squareTarget = {
drop(props) {
props.transferTraining(props.value); // drag and drop
console.log('DROP props :', props);
//moveKnight(props.x, props.y);
},
// hover(props, monitor, component) {
// // This is fired very often and lets you perform side effects
// // in response to the hover. You can't handle enter and leave
// // here—if you need them, put monitor.isOver() into collect() so you
// // can just use componentDidUpdate() to handle enter/leave.
// }
};
function collect(connect, monitor){
return{
connectDropTarget: connect.dropTarget(),
hovered: monitor.isOver(),
item: monitor.getItem(),
}
}
class TimeSlotGroup extends Component {
static propTypes = {
dayWrapperComponent: elementType,
timeslots: PropTypes.number.isRequired,
step: PropTypes.number.isRequired,
value: PropTypes.instanceOf(Date).isRequired,
showLabels: PropTypes.bool,
isNow: PropTypes.bool,
slotPropGetter: PropTypes.func,
timeGutterFormat: dateFormat,
culture: PropTypes.string,
resource: PropTypes.string,
}
static defaultProps = {
intervals: [],
timeslots: 2,
step: 30,
isNow: false,
showLabels: false,
freeTrainers: null,
}
constructor(props) {
super(props);
this.state = {
modalWasTransfer: false,
modalTooLateTransfer: false,
}
};
showWasTransferModal = () => {
this.setState({modalWasTransfer: true});
}
showTooLateTransferModal = () => {
this.setState({modalTooLateTransfer: true});
}
renderSlice(slotNumber, content, value) {
const {
dayWrapperComponent,
showLabels,
isNow,
culture,
resource,
slotPropGetter,
showTransferEvent, //my
} = this.props
return (
<TimeSlot
key={slotNumber}
slotNumber={slotNumber}
slotPropGetter={slotPropGetter}
dayWrapperComponent={dayWrapperComponent}
showLabel={showLabels}
content={content}
culture={culture}
isNow={isNow}
resource={resource}
value={value}
showTransferEvent={showTransferEvent}
/>
)
}
renderSlices() {
const ret = []
const sliceLength = this.props.step
let sliceValue = this.props.value;
for (let i = 0; i < this.props.timeslots; i++) {
const content = localizer.format(
sliceValue,
'HH:mm',
this.props.culture
);
ret.push(this.renderSlice(i, content, sliceValue))
sliceValue = date.add(sliceValue, sliceLength, 'minutes')
}
return ret
}
renderEvent = () => {
let {
events,
showTransferEvent,
freeTrainers,
setChoosenTrainer,
showLabels,
handleDrop,
onCancelTraining,
trainerTraining,
mode,
onGotoPage,
isPushBtnTransfer,
} = this.props;
const valueTime = this.props.value.getTime()
for( let i = 0; i < events.length; i++){
if(events[i].start.getTime() === valueTime && showLabels) {
return (
<EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
event={events[i]}
showTransferEvent={showTransferEvent}
setChoosenTrainer={this.props.setChoosenTrainer}
freeTrainers={freeTrainers}Б
idEvent={events[i].start.getTime()}
handleDrop={handleDrop}
setAbonement_Training = {this.props.setAbonement_Training}
onCancelTraining = {onCancelTraining}
mode = {mode}
onGotoPage = {onGotoPage}
isPushBtnTransfer = {isPushBtnTransfer}
deleteTraining = {this.props.deleteTraining}
deleteEventApiPatient={this.props.deleteEventApiPatient}
clickOnEvent={this.props.clickOnEvent}
selectIdEvent={this.props.selectIdEvent}
showTooLateTransferModal={this.showTooLateTransferModal}
showWasTransferModal={this.showWasTransferModal}
/>)
}
}
if(freeTrainers && freeTrainers.idEvent === this.props.value.getTime() && !showLabels){ // рендер выпадающего списка freeTrainer
return <EventSlot
key={this.props.value.getTime()}
value={this.props.value.getTime()}
showTransferEvent={showTransferEvent}
freeTrainers={freeTrainers}
setChoosenTrainer={this.props.setChoosenTrainer}
idEvent={freeTrainers.idEvent}
onGotoPage = {onGotoPage}
deleteEventApiPatient = {this.props.deleteEventApiPatient}
/>
}
return null;
}
renderMasterList = () => {
const {masterList, value, showMasterList} = this.props;
let freetrainers = [];
let busytrainers = [];
for(let elem in masterList){
if(elem === 'freetrainers') {
freetrainers = masterList[elem]
}
if(elem === 'busytrainers'){
busytrainers = masterList[elem]
}
}
if(freetrainers.length || busytrainers.length)
return (
<MasterListSlot
key={value.getTime()}
freetrainers = {freetrainers}
busytrainers = {busytrainers}
value = {value.getTime()}
showMasterList = {showMasterList}
/>
)
}
showModalTransferEvent = (idValue) => {
this.props.showModalTransferEvent(idValue);
}
render() {
//drag and drop
const { connectDropTarget, hovered, item} = this.props;
const backgroundColor= hovered ? '#e8f8fc ' : 'white';
const {intervals, value, freeTrainers, isAdmin} = this.props;
let valuetM = value.getTime();
const flag = Array.isArray(intervals) ? intervals.some(el => {
if(Array.isArray(el.intervals)){
for(let i = 0; i < el.intervals.length; i++){
if(value.getTime() >= el.intervals[i].start*1000 && value.getTime() < el.intervals[i].end * 1000)
return true
}
}
else{
if((valuetM >= el.start*1000) && valuetM < (el.end * 1000)) return true
}
}) : null
const isViewTrainer = (freeTrainers && freeTrainers.idEvent === this.props.value.getTime()) ? true : false;//не OK если таместь freeTrainers
const currentEvent = this.renderEvent();
let cellClass = cn('rbc-timeslot-group', flag && !isViewTrainer && !currentEvent ? 'rbc-timeslot-group-OK' : 'rbc-timeslot-group-NOT');
const modalTransferEvent = flag && !isViewTrainer && !currentEvent ? this.showModalTransferEvent : () => {}; // перенос тренировки
if(isAdmin) {
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{this.renderMasterList()}
</div>
)
} | return connectDropTarget(
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
</div>
)
}
return (
<div className={cellClass} style={{backgroundColor}} onClick={(e) => modalTransferEvent(value.getTime())}>
{this.renderSlices()}
{currentEvent}
<Modal
title='Сообщение'
visible={this.state.modalTooLateTransfer}
onCancel={() => this.setState({modalTooLateTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Переносить тренировку можно только за 24 часа до тренировки
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalTooLateTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
<Modal
title='Сообщение'
visible={this.state.modalWasTransfer}
onCancel={() => this.setState({modalWasTransfer : false})}
width={360}
className="schedule-message-modal-wrapper"
>
<div className="schedule-message-modal-text">
Одну тренировку можно переносить лишь один раз
</div>
<div className="schedule-message-modal">
<div className="schedule-message-btn">
<Button btnText='Ок'
onClick= {() => {
this.setState({modalWasTransfer: false});
}}
type='yellow'/>
</div>
</div>
</Modal>
</div>
)
}
}
export default DropTarget('event-group', squareTarget, collect)(TimeSlotGroup); |
if(flag && !isViewTrainer && !currentEvent){ | random_line_split |
semaphore.rs | use std::{fmt, mem};
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::atomic::Ordering::{Relaxed, Acquire};
use crate::state::ReleaseState::Unlocked;
use crate::state::AcquireState::{Available, Queued};
use std::fmt::{Debug, Formatter};
use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState};
use std::cell::UnsafeCell;
use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc};
use std::marker::{PhantomPinned, PhantomData};
use crate::waker::AtomicWaker;
use std::ptr::null;
use std::sync::Arc;
use crate::atomic::Atomic;
use std::mem::size_of;
use crate::release::ReleaseAction;
#[allow(unused_imports)] // used by docs
use crate::errors::PoisonError;
/// An async weighted semaphore. See [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
}
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) |
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let guard = self.try_acquire(amount)?;
let result = SemaphoreGuardArc::new(self.clone(), amount);
guard.forget();
Ok(result)
}
/// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire)
/// that can succeed with the additional permits. Calling `release` often makes sense after calling
/// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that
/// are available for processing.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// use async_channel::{Receiver, RecvError};
/// // Limit size of a producer-consumer queue
/// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{
/// let result = recv.recv().await?;
/// // Note that this only guards elements in the queue, not those being processed after the
/// // queue.
/// semaphore.release(1);
/// Ok(result)
/// }
/// ```
pub fn release(&self, amount: usize) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::new(amount) }.release();
}
}
/// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately.
/// This can be used to unblock pending acquires when the guarded operation would fail anyway.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// # use std::sync::Arc;
/// # use async_std::sync::Mutex;
/// use async_channel::{Receiver, RecvError};
/// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){
/// while let Ok(x) = receiver.recv().await {
/// println!("{:?}", x);
/// semaphore.release(1);
/// }
/// // There will be no more calls to recv, so unblock all senders.
/// semaphore.poison();
/// }
/// ```
pub fn poison(&self) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::poison() }.release();
}
}
}
| {
return Ok(SemaphoreGuard::new(self, amount));
} | conditional_block |
semaphore.rs | use std::{fmt, mem};
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::atomic::Ordering::{Relaxed, Acquire};
use crate::state::ReleaseState::Unlocked;
use crate::state::AcquireState::{Available, Queued};
use std::fmt::{Debug, Formatter};
use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState};
use std::cell::UnsafeCell;
use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc};
use std::marker::{PhantomPinned, PhantomData};
use crate::waker::AtomicWaker;
use std::ptr::null;
use std::sync::Arc;
use crate::atomic::Atomic;
use std::mem::size_of;
use crate::release::ReleaseAction;
#[allow(unused_imports)] // used by docs
use crate::errors::PoisonError;
/// An async weighted semaphore. See [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
}
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn | (&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) {
return Ok(SemaphoreGuard::new(self, amount));
}
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let guard = self.try_acquire(amount)?;
let result = SemaphoreGuardArc::new(self.clone(), amount);
guard.forget();
Ok(result)
}
/// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire)
/// that can succeed with the additional permits. Calling `release` often makes sense after calling
/// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that
/// are available for processing.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// use async_channel::{Receiver, RecvError};
/// // Limit size of a producer-consumer queue
/// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{
/// let result = recv.recv().await?;
/// // Note that this only guards elements in the queue, not those being processed after the
/// // queue.
/// semaphore.release(1);
/// Ok(result)
/// }
/// ```
pub fn release(&self, amount: usize) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::new(amount) }.release();
}
}
/// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately.
/// This can be used to unblock pending acquires when the guarded operation would fail anyway.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// # use std::sync::Arc;
/// # use async_std::sync::Mutex;
/// use async_channel::{Receiver, RecvError};
/// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){
/// while let Ok(x) = receiver.recv().await {
/// println!("{:?}", x);
/// semaphore.release(1);
/// }
/// // There will be no more calls to recv, so unblock all senders.
/// semaphore.poison();
/// }
/// ```
pub fn poison(&self) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::poison() }.release();
}
}
}
| try_acquire | identifier_name |
semaphore.rs | use std::{fmt, mem};
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::atomic::Ordering::{Relaxed, Acquire};
use crate::state::ReleaseState::Unlocked;
use crate::state::AcquireState::{Available, Queued};
use std::fmt::{Debug, Formatter};
use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState};
use std::cell::UnsafeCell;
use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc};
use std::marker::{PhantomPinned, PhantomData};
use crate::waker::AtomicWaker;
use std::ptr::null;
use std::sync::Arc;
use crate::atomic::Atomic;
use std::mem::size_of;
use crate::release::ReleaseAction;
#[allow(unused_imports)] // used by docs
use crate::errors::PoisonError;
/// An async weighted semaphore. See [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result |
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) {
return Ok(SemaphoreGuard::new(self, amount));
}
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let guard = self.try_acquire(amount)?;
let result = SemaphoreGuardArc::new(self.clone(), amount);
guard.forget();
Ok(result)
}
/// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire)
/// that can succeed with the additional permits. Calling `release` often makes sense after calling
/// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that
/// are available for processing.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// use async_channel::{Receiver, RecvError};
/// // Limit size of a producer-consumer queue
/// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{
/// let result = recv.recv().await?;
/// // Note that this only guards elements in the queue, not those being processed after the
/// // queue.
/// semaphore.release(1);
/// Ok(result)
/// }
/// ```
pub fn release(&self, amount: usize) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::new(amount) }.release();
}
}
/// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately.
/// This can be used to unblock pending acquires when the guarded operation would fail anyway.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// # use std::sync::Arc;
/// # use async_std::sync::Mutex;
/// use async_channel::{Receiver, RecvError};
/// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){
/// while let Ok(x) = receiver.recv().await {
/// println!("{:?}", x);
/// semaphore.release(1);
/// }
/// // There will be no more calls to recv, so unblock all senders.
/// semaphore.poison();
/// }
/// ```
pub fn poison(&self) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::poison() }.release();
}
}
}
| {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
} | identifier_body |
semaphore.rs | use std::{fmt, mem};
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::atomic::Ordering::{Relaxed, Acquire};
use crate::state::ReleaseState::Unlocked;
use crate::state::AcquireState::{Available, Queued};
use std::fmt::{Debug, Formatter};
use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState};
use std::cell::UnsafeCell;
use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc};
use std::marker::{PhantomPinned, PhantomData};
use crate::waker::AtomicWaker;
use std::ptr::null;
use std::sync::Arc;
use crate::atomic::Atomic;
use std::mem::size_of;
use crate::release::ReleaseAction;
#[allow(unused_imports)] // used by docs
use crate::errors::PoisonError;
/// An async weighted semaphore. See [crate documentation](index.html) for usage.
// This implementation encodes state (the available counter, acquire queue, and cancel queue) into
// multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize
// by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting
// to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise
// the lock is marked dirty to indicate that there is additional work for the lock owner to do.
pub struct Semaphore {
// The number of available permits or the back of the queue (without next edges).
pub(crate) acquire: Atomic<AcquireState>,
// A number of releasable permits, and the state of the current release lock.
pub(crate) release: Atomic<ReleaseState>,
// The front of the queue (with next edges).
pub(crate) front: UnsafeCell<*const Waiter>,
// The last node swapped from AcquireState (with next edges).
pub(crate) middle: UnsafeCell<*const Waiter>,
// A stack of nodes that are cancelling.
pub(crate) next_cancel: Atomic<*const Waiter>,
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl UnwindSafe for Semaphore {}
impl RefUnwindSafe for Semaphore {}
impl Debug for Semaphore {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.acquire.load(Relaxed) {
Available(available) => write!(f, "Semaphore::Ready({:?})", available)?,
Queued(_) => match self.release.load(Relaxed) {
Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?,
_ => write!(f, "Semaphore::Unknown")?,
},
};
Ok(())
}
}
impl Semaphore {
/// The maximum number of permits that can be made available. This is slightly smaller than
/// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the
/// semaphore.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard};
/// struct ReadWriteLock(Semaphore);
/// impl ReadWriteLock {
/// fn new() -> Self {
/// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE))
/// }
/// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers.
/// async fn read(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(1).await.unwrap()
/// }
/// // The writer acquires all the permits, prevent any concurrent writers or readers. The
/// // first-in-first-out priority policy prevents writer starvation.
/// async fn write(&self) -> SemaphoreGuard<'_> {
/// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap()
/// }
/// }
/// ```
pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1;
/// Create a new semaphore with an initial number of permits.
/// # Examples
/// ```
/// use async_weighted_semaphore::Semaphore;
/// let semaphore = Semaphore::new(1024);
/// ```
pub fn new(initial: usize) -> Self {
Semaphore {
acquire: Atomic::new(Available(Permits::new(initial))),
release: Atomic::new(Unlocked(Permits::new(0))),
front: UnsafeCell::new(null()),
middle: UnsafeCell::new(null()),
next_cancel: Atomic::new(null()),
}
}
/// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available.
/// Then consume the requested permits and return a [`SemaphoreGuard`].
/// # Errors
/// Returns [`PoisonError`] is the semaphore is poisoned.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// let guard = semaphore.acquire(1).await.unwrap();
/// future.await
/// }
/// ```
pub fn acquire(&self, amount: usize) -> AcquireFuture {
AcquireFuture(UnsafeCell::new(Waiter {
semaphore: self,
step: UnsafeCell::new(AcquireStep::Entering),
waker: unsafe { AtomicWaker::new() },
amount,
next: UnsafeCell::new(null()),
prev: UnsafeCell::new(null()),
next_cancel: UnsafeCell::new(null()),
}), PhantomData, PhantomPinned)
}
/// Like [acquire](#method.acquire), but fails if the call would block.
/// # Errors
/// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned.
/// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can
/// occur if there are insufficient available permits or if there is another pending call to acquire.
/// # Examples
/// ```
/// # use futures::executor::block_on;
/// # use std::future::Future;
/// use async_weighted_semaphore::Semaphore;
/// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) {
/// if semaphore.try_acquire(1).is_ok() {
/// future.await
/// }
/// }
/// ```
pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> {
let mut current = self.acquire.load(Acquire);
loop {
match current {
Queued(_) => return Err(TryAcquireError::WouldBlock),
Available(available) => {
let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?;
if available < amount {
return Err(TryAcquireError::WouldBlock);
}
if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) {
return Ok(SemaphoreGuard::new(self, amount));
}
}
}
}
}
/// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, SendError};
/// // Limit size of a producer-consumer queue
/// async fn send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), SendError<T>>{
/// match semaphore.acquire_arc(1).await {
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(PoisonError) => Err(SendError(message)),
/// Ok(guard) => match sender.send((guard, message)).await{
/// Err(SendError((guard, message))) => Err(SendError(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// } | pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc {
AcquireFutureArc {
arc: self.clone(),
inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) },
}
}
/// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`,
/// [`Send`] and [`Sync`].
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc};
/// # use std::sync::Arc;
/// use async_channel::{Sender, TrySendError};
/// // Limit size of a producer-consumer queue
/// async fn try_send<T>(semaphore: &Arc<Semaphore>,
/// sender: &Sender<(SemaphoreGuardArc, T)>,
/// message: T
/// ) -> Result<(), TrySendError<T>>{
/// match semaphore.try_acquire_arc(1) {
/// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)),
/// // A semaphore can be poisoned to prevent deadlock when a channel closes.
/// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)),
/// Ok(guard) => match sender.try_send((guard, message)) {
/// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)),
/// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)),
/// Ok(()) => Ok(())
/// }
/// }
/// }
/// ```
pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> {
let guard = self.try_acquire(amount)?;
let result = SemaphoreGuardArc::new(self.clone(), amount);
guard.forget();
Ok(result)
}
/// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire)
/// that can succeed with the additional permits. Calling `release` often makes sense after calling
/// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that
/// are available for processing.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// use async_channel::{Receiver, RecvError};
/// // Limit size of a producer-consumer queue
/// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{
/// let result = recv.recv().await?;
/// // Note that this only guards elements in the queue, not those being processed after the
/// // queue.
/// semaphore.release(1);
/// Ok(result)
/// }
/// ```
pub fn release(&self, amount: usize) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::new(amount) }.release();
}
}
/// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately.
/// This can be used to unblock pending acquires when the guarded operation would fail anyway.
/// # Examples
/// ```
/// # use async_weighted_semaphore::{Semaphore, TryAcquireError};
/// # use std::sync::Arc;
/// # use async_std::sync::Mutex;
/// use async_channel::{Receiver, RecvError};
/// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){
/// while let Ok(x) = receiver.recv().await {
/// println!("{:?}", x);
/// semaphore.release(1);
/// }
/// // There will be no more calls to recv, so unblock all senders.
/// semaphore.poison();
/// }
/// ```
pub fn poison(&self) {
unsafe {
ReleaseAction { sem: self, releasable: Permits::poison() }.release();
}
}
} | /// ``` | random_line_split |
14.rs | // --- Day 14: Disk Defragmentation ---
// Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible.
// The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes.
// A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1).
// The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
}
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
}
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn | (size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters.grid.len();
// print_small_grid(10, &occupied);
for i in 0..len {
let jlen = clusters.grid[i].len();
for j in 0..jlen {
let val = clusters.state(&Loc(i, j));
if occupied[i][j] {
let mut adj_clusters = vec![];
for o in [-1, 1].iter() {
let it = (i as i64) + *o;
let jt = (j as i64) + *o;
if it >= 0 && it < len as i64 {
let loc = Loc(it as usize, j);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
if jt >= 0 && jt < jlen as i64 {
let loc = Loc(i, jt as usize);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
}
if adj_clusters.len() > 0 {
let min = adj_clusters.iter().clone().min().unwrap();
for id in adj_clusters.iter() {
clusters.merge_clusters(*min, &id);
}
clusters.add_to_cluster(Loc(i, j), *min);
} else {
clusters.new_cluster(Loc(i, j));
}
}
else {
clusters.set_empty(Loc(i, j))
}
}
}
// clusters.print_small(10);
clusters.index.keys().len() as u32
}
fn part_two() {
let grid = make_grid("jxqlasbh");
let count = count_clusters(&grid);
println!("14-2: {} clusters in {}", count, "jxqlasbh");
}
fn main() {
part_one();
part_two();
}
#[cfg(test)]
mod tests {
use count_hash_seed;
use hex_to_bits;
use count_clusters;
use make_grid;
#[test]
fn test_count_clusters() {
assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242);
}
#[test]
fn test_count_hash_seed() {
assert_eq!(count_hash_seed("flqrgnkx"), 8108);
}
#[test]
fn test_hex_to_bits() {
for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() {
let actual = hex_to_bits(letter);
let actual_binary_string = actual
.iter()
.map(|b| if *b { '1' } else { '0' }).collect::<String>();
let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap();
assert_eq!(actual_value, expected_value as u8);
}
}
}
| print_small_grid | identifier_name |
14.rs | // --- Day 14: Disk Defragmentation ---
// Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible.
// The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes.
// A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1).
// The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
}
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
}
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters.grid.len();
// print_small_grid(10, &occupied);
for i in 0..len {
let jlen = clusters.grid[i].len();
for j in 0..jlen {
let val = clusters.state(&Loc(i, j));
if occupied[i][j] {
let mut adj_clusters = vec![];
for o in [-1, 1].iter() {
let it = (i as i64) + *o;
let jt = (j as i64) + *o;
if it >= 0 && it < len as i64 {
let loc = Loc(it as usize, j);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
if jt >= 0 && jt < jlen as i64 {
let loc = Loc(i, jt as usize);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
}
if adj_clusters.len() > 0 {
let min = adj_clusters.iter().clone().min().unwrap();
for id in adj_clusters.iter() {
clusters.merge_clusters(*min, &id);
}
clusters.add_to_cluster(Loc(i, j), *min);
} else |
}
else {
clusters.set_empty(Loc(i, j))
}
}
}
// clusters.print_small(10);
clusters.index.keys().len() as u32
}
fn part_two() {
let grid = make_grid("jxqlasbh");
let count = count_clusters(&grid);
println!("14-2: {} clusters in {}", count, "jxqlasbh");
}
fn main() {
part_one();
part_two();
}
#[cfg(test)]
mod tests {
use count_hash_seed;
use hex_to_bits;
use count_clusters;
use make_grid;
#[test]
fn test_count_clusters() {
assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242);
}
#[test]
fn test_count_hash_seed() {
assert_eq!(count_hash_seed("flqrgnkx"), 8108);
}
#[test]
fn test_hex_to_bits() {
for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() {
let actual = hex_to_bits(letter);
let actual_binary_string = actual
.iter()
.map(|b| if *b { '1' } else { '0' }).collect::<String>();
let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap();
assert_eq!(actual_value, expected_value as u8);
}
}
}
| {
clusters.new_cluster(Loc(i, j));
} | conditional_block |
14.rs | // --- Day 14: Disk Defragmentation ---
// Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible.
// The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes.
// A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1).
// The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> |
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
}
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters.grid.len();
// print_small_grid(10, &occupied);
for i in 0..len {
let jlen = clusters.grid[i].len();
for j in 0..jlen {
let val = clusters.state(&Loc(i, j));
if occupied[i][j] {
let mut adj_clusters = vec![];
for o in [-1, 1].iter() {
let it = (i as i64) + *o;
let jt = (j as i64) + *o;
if it >= 0 && it < len as i64 {
let loc = Loc(it as usize, j);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
if jt >= 0 && jt < jlen as i64 {
let loc = Loc(i, jt as usize);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
}
if adj_clusters.len() > 0 {
let min = adj_clusters.iter().clone().min().unwrap();
for id in adj_clusters.iter() {
clusters.merge_clusters(*min, &id);
}
clusters.add_to_cluster(Loc(i, j), *min);
} else {
clusters.new_cluster(Loc(i, j));
}
}
else {
clusters.set_empty(Loc(i, j))
}
}
}
// clusters.print_small(10);
clusters.index.keys().len() as u32
}
fn part_two() {
let grid = make_grid("jxqlasbh");
let count = count_clusters(&grid);
println!("14-2: {} clusters in {}", count, "jxqlasbh");
}
fn main() {
part_one();
part_two();
}
#[cfg(test)]
mod tests {
use count_hash_seed;
use hex_to_bits;
use count_clusters;
use make_grid;
#[test]
fn test_count_clusters() {
assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242);
}
#[test]
fn test_count_hash_seed() {
assert_eq!(count_hash_seed("flqrgnkx"), 8108);
}
#[test]
fn test_hex_to_bits() {
for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() {
let actual = hex_to_bits(letter);
let actual_binary_string = actual
.iter()
.map(|b| if *b { '1' } else { '0' }).collect::<String>();
let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap();
assert_eq!(actual_value, expected_value as u8);
}
}
}
| {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
} | identifier_body |
14.rs | // --- Day 14: Disk Defragmentation ---
// Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible.
// The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes.
// A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1).
// The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127.
// The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary.
// Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and . to denote free ones:
// ##.#.#..-->
// .#.#.#.#
// ....#.#.
// #.#.##.#
// .##.#...
// ##..#..#
// .#...#..
// ##.#.##.-->
// | |
// V V
// In this example, 8108 squares are used across the entire 128x128 grid.
// Given your actual key string, how many squares are used?
// Your puzzle input is jxqlasbh.
#![feature(conservative_impl_trait)]
#![feature(entry_and_modify)]
// #![feature(nll)]
extern crate advent2017;
use advent2017::knot::{Knot};
use std::io::Cursor;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
/// Given any Binary, return an iterator that iterates through the binary
/// representation of the type (msb first), and returns true whenever the bit is set.
fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> {
let s = format!("{:04b}", num);
s.chars()
.map(|c| c == '1')
.collect::<Vec<bool>>()
}
/// Given a string representing a hexadecimal number,
/// where each character of the string is a hexadecimal digit representing 4 binary bits,
/// return a bitfield of the unsigned binary representation of that number,
/// msb at index 0
fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> {
(0..hex.len())
.map(|i| &hex[i..i+1])
.map(|slice| u8::from_str_radix(slice, 16).unwrap())
.flat_map(|num| num_to_bits(num))
.collect::<Vec<bool>>()
}
fn hashes(seed: &str) -> Vec<String> {
(0..128)
.map(|i| format!("{}-{}", seed, i))
.map(|plaintext| {
let mut knot = Knot::new();
knot.hash(Cursor::new(plaintext))
})
.collect()
}
fn bitcount_hash(hash: &str) -> u32 {
let mut bitsum = 0;
for j in 0..32 {
let slice = &hash[j..j+1];
let num = u32::from_str_radix(slice, 16).unwrap();
bitsum += num.count_ones();
}
bitsum
}
fn count_hash_seed(s: &str) -> u32 {
let mut bitsum = 0;
for hash in hashes(&s) {
bitsum += bitcount_hash(&hash);
}
bitsum
}
fn part_one() {
let input = "jxqlasbh";
println!("{}: {}", input, count_hash_seed(&input));
}
// --- Part Two ---
// Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region.
// In the example above, the following nine regions are visible, each marked with a distinct digit:
// 11.2.3..-->
// .1.2.3.4
// ....5.6.
// 7.8.55.9
// .88.5...
// 88..5..8
// .8...8..
// 88.8.88.-->
// | |
// V V
// Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present.
// How many regions are present given your key string?
fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> {
let mut grid = Vec::with_capacity(128);
for hash in hashes(hash_seed) {
grid.push(hex_to_bits(&hash));
}
grid
}
/// make a single scan through the grid
// At each position, if the cell is filled, look in each cardinal direction for adjacent clusters
// If at least one is found, merge this element and all clusters that it is touching into the
// cluster with the lowest id that was found.
// If none are found, then start a new cluster on this cell.
type ClusterId = i32;
#[derive(Debug)]
struct Loc(usize, usize);
type CGrid = Vec<Vec<CellState>>;
type CMap = HashMap<ClusterId, Vec<Loc>>;
#[derive(PartialEq, Eq, Debug, Clone)]
enum CellState {
Unclaimed,
Empty,
Id(ClusterId)
}
struct Clusters {
grid: CGrid,
index: CMap,
next_id: ClusterId
}
impl Clusters {
fn new(size: u32) -> Self {
let mut grid : Vec<Vec<CellState>> = Vec::new();
for _ in 0..size {
let mut row = vec![];
for _ in 0..size {
row.push(CellState::Unclaimed);
}
grid.push(row);
}
Clusters { grid, index: HashMap::new(), next_id: 0 }
}
fn print_small(&self, window_size: usize) {
for row in self.grid.iter().take(window_size) {
println!("\n{}", row.iter().take(window_size).map(|c| match c {
&CellState::Id(id) => format!("{:4}", id),
&CellState::Empty => " .".to_string(),
&CellState::Unclaimed => " ?".to_string()
})
.collect::<Vec<String>>()
.join(" "));
}
}
fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) {
self.grid[i][j] = CellState::Id(id);
| }
fn new_cluster(&mut self, loc: Loc) {
let id = self.next_id;
self.next_id += 1;
self.add_to_cluster(loc, id);
}
fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) {
self.add_grid(&loc, id);
match self.index.entry(id) {
Occupied(mut e) => { e.get_mut().push(loc); }
Vacant(e) => { e.insert(vec![loc]); }
}
}
fn set_empty(&mut self, Loc(i, j): Loc) {
self.grid[i][j] = CellState::Empty;
}
fn state(&self, &Loc(i, j): &Loc) -> CellState {
self.grid[i][j].clone()
}
fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) {
if dest == *other {
return;
}
if let Some(mut locs) = self.index.remove(&other) {
for loc in locs.iter() {
self.add_grid(&loc, dest);
}
self.index.entry(dest)
.and_modify(|f| f.append(&mut locs))
.or_insert_with(|| locs );
}
}
}
fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) {
for row in occupied.iter().take(size as usize) {
println!("\n{}", row.iter().take(size as usize).map(|c| match c {
&true => "#",
&false => ".",
})
.collect::<Vec<&str>>()
.join(" "));
}
}
/*
This algorithm makes one pass through the grid, left to right, top to bottom.
At each cell, if the cell is occupied, it checks all neighboring cells for any that
belong to a cluster. Then current cell and all of its cluster neighbors are merged into
the lowest-id cluster that it finds.
If the cell is occupied but has no neighbors that belong to cells, a new cluster is started.
*/
fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 {
let size = 128;
let mut clusters = Clusters::new(size);
let len = clusters.grid.len();
// print_small_grid(10, &occupied);
for i in 0..len {
let jlen = clusters.grid[i].len();
for j in 0..jlen {
let val = clusters.state(&Loc(i, j));
if occupied[i][j] {
let mut adj_clusters = vec![];
for o in [-1, 1].iter() {
let it = (i as i64) + *o;
let jt = (j as i64) + *o;
if it >= 0 && it < len as i64 {
let loc = Loc(it as usize, j);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
if jt >= 0 && jt < jlen as i64 {
let loc = Loc(i, jt as usize);
if let CellState::Id(id) = clusters.state(&loc) {
adj_clusters.push(id);
}
}
}
if adj_clusters.len() > 0 {
let min = adj_clusters.iter().clone().min().unwrap();
for id in adj_clusters.iter() {
clusters.merge_clusters(*min, &id);
}
clusters.add_to_cluster(Loc(i, j), *min);
} else {
clusters.new_cluster(Loc(i, j));
}
}
else {
clusters.set_empty(Loc(i, j))
}
}
}
// clusters.print_small(10);
clusters.index.keys().len() as u32
}
fn part_two() {
let grid = make_grid("jxqlasbh");
let count = count_clusters(&grid);
println!("14-2: {} clusters in {}", count, "jxqlasbh");
}
fn main() {
part_one();
part_two();
}
#[cfg(test)]
mod tests {
use count_hash_seed;
use hex_to_bits;
use count_clusters;
use make_grid;
#[test]
fn test_count_clusters() {
assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242);
}
#[test]
fn test_count_hash_seed() {
assert_eq!(count_hash_seed("flqrgnkx"), 8108);
}
#[test]
fn test_hex_to_bits() {
for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() {
let actual = hex_to_bits(letter);
let actual_binary_string = actual
.iter()
.map(|b| if *b { '1' } else { '0' }).collect::<String>();
let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap();
assert_eq!(actual_value, expected_value as u8);
}
}
} | random_line_split | |
tsne2.js | // create main global object
const tsnejs = window.tsnejs || { REVISION: 'ALPHA' };
function assert(condition, message) {
if (!condition) { throw message || 'Assertion failed'; }
}
// utilitity that creates contiguous vector of zeros of size n
function zeros(n) {
if (typeof (n) === 'undefined' || isNaN(n)) { return []; }
if (typeof ArrayBuffer === 'undefined') {
// lacking browser support
const arr = new Array(n);
for (let i = 0; i < n; i++) { arr[i] = 0; }
return arr;
}
return new Float64Array(n); // typed arrays are faster
}
// return 0 mean unit standard deviation random number
let returnCache = false;
let gaussCache = 0.0;
function gaussRandom() {
if (returnCache) {
returnCache = false;
return gaussCache;
}
const u = (2 * Math.random()) - 1;
const v = (2 * Math.random()) - 1;
const r = (u * u) + (v * v);
if (r === 0 || r > 1) return gaussRandom();
const c = Math.sqrt((-2 * Math.log(r)) / r);
gaussCache = v * c; // cache this for next function call for efficiency
returnCache = true;
return u * c;
}
// return random normal number
function randn(mu, std) {
return mu + (gaussRandom() * std);
}
// utility that returns 2d array filled with random numbers
// or with value s, if provided
function randn2d(n, d, s) {
const uses = typeof s !== 'undefined';
const x = [];
for (let i = 0; i < n; i++) {
const xhere = [];
for (let j = 0; j < d; j++) {
if (uses) {
xhere.push(s);
} else {
xhere.push(randn(0.0, 1e-4));
}
}
x.push(xhere);
}
return x;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function pairwiseDistances(X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X[0].length;
assert(N > 0, ' X is empty? You must have some data!');
assert(D > 0, ' X[0] is empty? Where is the data?');
const pairwiseDistancesOfInput = pairwiseDistances(X); // convert X to distances using gaussian kernel
this.P = d2p(pairwiseDistancesOfInput, this.perplexity, 1e-4); // attach to object
this.N = N; // back up the size of the dataset
this.initSolution(); // refresh this
},
// this function takes a given distance matrix and creates
// matrix P from them.
// D is assumed to be provided as a list of lists, and should be symmetric
initDataDist(distancesMatrix) {
const N = distancesMatrix.length;
assert(N > 0, ' X is empty? You must have some data!');
// convert D to a (fast) typed array version
const convertedDistances = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = distancesMatrix[i][j];
convertedDistances[i * N + j] = d;
convertedDistances[j * N + i] = d;
}
}
this.P = d2p(convertedDistances, this.perplexity, 1e-4);
this.N = N;
this.initSolution(); // refresh this
},
// (re)initializes the solution to random
initSolution() {
// generate random solution to t-SNE
this.solution = randn2d(this.N, this.dim); // the solution
this.gains = randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions
this.ystep = randn2d(this.N, this.dim, 0.0); // momentum accumulator
this.iter = 0;
},
// return pointer to curren | / perform gradient step
const ymean = zeros(this.dim);
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
const gid = grad[i][d];
const sid = this.ystep[i][d];
const gainid = this.gains[i][d];
// compute gain update
let newgain = sign(gid) === sign(sid) ? gainid * 0.8 : gainid + 0.2;
if (newgain < 0.01) newgain = 0.01; // clamp
this.gains[i][d] = newgain; // store for next turn
// compute momentum step direction
const momval = this.iter < 250 ? 0.5 : 0.8;
const newsid = momval * sid - this.epsilon * newgain * grad[i][d];
this.ystep[i][d] = newsid; // remember the step we took
// step!
this.solution[i][d] += newsid;
ymean[d] += this.solution[i][d]; // accumulate mean so that we can center later
}
}
// reproject Y to be zero mean
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
this.solution[i][d] -= ymean[d] / N;
}
}
if (this.iter % 100 === 0) console.log(`iter ${this.iter}, cost: ${cost}`);
return cost; // return current cost
},
// return cost and gradient, given an arrangement
costAndGradient(solution) {
const N = this.N;
const dimentions = this.dim; // dim of output space
const P = this.P;
const pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima
// compute current Q distribution, unnormalized first
const Qu = zeros(N * N);
let qsum = 0.0;
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
let dsum = 0.0;
for (let d = 0; d < dimentions; d++) {
const dhere = solution[i][d] - solution[j][d];
dsum += dhere * dhere;
}
const qu = 1.0 / (1.0 + dsum); // Student t-distribution
Qu[i * N + j] = qu;
Qu[j * N + i] = qu;
qsum += 2 * qu;
}
}
// normalize Q distribution to sum to 1
const NN = N * N;
const Q = zeros(NN);
for (let q = 0; q < NN; q++) { Q[q] = Math.max(Qu[q] / qsum, 1e-100); }
let cost = 0.0;
const grad = [];
for (let i = 0; i < N; i++) {
const gsum = new Array(dimentions); // init grad for point i
for (let d = 0; d < dimentions; d++) { gsum[d] = 0.0; }
for (let j = 0; j < N; j++) {
cost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)
const premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * Qu[i * N + j];
for (let d = 0; d < dimentions; d++) {
gsum[d] += premult * (solution[i][d] - solution[j][d]);
}
}
grad.push(gsum);
}
return { cost, grad };
},
};
tsnejs.tSNE = tSNE; // export tSNE class
}
init();
| t solution
getSolution() {
return this.solution;
},
// perform a single step of optimization to improve the embedding
step() {
this.iter += 1;
const N = this.N;
const cg = this.costAndGradient(this.solution); // evaluate gradient
const cost = cg.cost;
const grad = cg.grad;
/ | identifier_body |
tsne2.js | // create main global object
const tsnejs = window.tsnejs || { REVISION: 'ALPHA' };
function assert(condition, message) {
if (!condition) { throw message || 'Assertion failed'; }
}
// utilitity that creates contiguous vector of zeros of size n
function zeros(n) {
if (typeof (n) === 'undefined' || isNaN(n)) { return []; }
if (typeof ArrayBuffer === 'undefined') {
// lacking browser support
const arr = new Array(n);
for (let i = 0; i < n; i++) { arr[i] = 0; }
return arr;
}
return new Float64Array(n); // typed arrays are faster
}
// return 0 mean unit standard deviation random number
let returnCache = false;
let gaussCache = 0.0;
function gaussRandom() {
if (returnCache) {
returnCache = false;
return gaussCache;
}
const u = (2 * Math.random()) - 1;
const v = (2 * Math.random()) - 1;
const r = (u * u) + (v * v);
if (r === 0 || r > 1) return gaussRandom();
const c = Math.sqrt((-2 * Math.log(r)) / r);
gaussCache = v * c; // cache this for next function call for efficiency
returnCache = true;
return u * c;
}
// return random normal number
function randn(mu, std) {
return mu + (gaussRandom() * std);
}
// utility that returns 2d array filled with random numbers
// or with value s, if provided
function randn2d(n, d, s) {
const uses = typeof s !== 'undefined';
const x = [];
for (let i = 0; i < n; i++) {
const xhere = [];
for (let j = 0; j < d; j++) {
if (uses) {
xhere.push(s);
} else {
xhere.push(randn(0.0, 1e-4));
}
}
x.push(xhere);
}
return x;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function | (X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X[0].length;
assert(N > 0, ' X is empty? You must have some data!');
assert(D > 0, ' X[0] is empty? Where is the data?');
const pairwiseDistancesOfInput = pairwiseDistances(X); // convert X to distances using gaussian kernel
this.P = d2p(pairwiseDistancesOfInput, this.perplexity, 1e-4); // attach to object
this.N = N; // back up the size of the dataset
this.initSolution(); // refresh this
},
// this function takes a given distance matrix and creates
// matrix P from them.
// D is assumed to be provided as a list of lists, and should be symmetric
initDataDist(distancesMatrix) {
const N = distancesMatrix.length;
assert(N > 0, ' X is empty? You must have some data!');
// convert D to a (fast) typed array version
const convertedDistances = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = distancesMatrix[i][j];
convertedDistances[i * N + j] = d;
convertedDistances[j * N + i] = d;
}
}
this.P = d2p(convertedDistances, this.perplexity, 1e-4);
this.N = N;
this.initSolution(); // refresh this
},
// (re)initializes the solution to random
initSolution() {
// generate random solution to t-SNE
this.solution = randn2d(this.N, this.dim); // the solution
this.gains = randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions
this.ystep = randn2d(this.N, this.dim, 0.0); // momentum accumulator
this.iter = 0;
},
// return pointer to current solution
getSolution() {
return this.solution;
},
// perform a single step of optimization to improve the embedding
step() {
this.iter += 1;
const N = this.N;
const cg = this.costAndGradient(this.solution); // evaluate gradient
const cost = cg.cost;
const grad = cg.grad;
// perform gradient step
const ymean = zeros(this.dim);
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
const gid = grad[i][d];
const sid = this.ystep[i][d];
const gainid = this.gains[i][d];
// compute gain update
let newgain = sign(gid) === sign(sid) ? gainid * 0.8 : gainid + 0.2;
if (newgain < 0.01) newgain = 0.01; // clamp
this.gains[i][d] = newgain; // store for next turn
// compute momentum step direction
const momval = this.iter < 250 ? 0.5 : 0.8;
const newsid = momval * sid - this.epsilon * newgain * grad[i][d];
this.ystep[i][d] = newsid; // remember the step we took
// step!
this.solution[i][d] += newsid;
ymean[d] += this.solution[i][d]; // accumulate mean so that we can center later
}
}
// reproject Y to be zero mean
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
this.solution[i][d] -= ymean[d] / N;
}
}
if (this.iter % 100 === 0) console.log(`iter ${this.iter}, cost: ${cost}`);
return cost; // return current cost
},
// return cost and gradient, given an arrangement
costAndGradient(solution) {
const N = this.N;
const dimentions = this.dim; // dim of output space
const P = this.P;
const pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima
// compute current Q distribution, unnormalized first
const Qu = zeros(N * N);
let qsum = 0.0;
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
let dsum = 0.0;
for (let d = 0; d < dimentions; d++) {
const dhere = solution[i][d] - solution[j][d];
dsum += dhere * dhere;
}
const qu = 1.0 / (1.0 + dsum); // Student t-distribution
Qu[i * N + j] = qu;
Qu[j * N + i] = qu;
qsum += 2 * qu;
}
}
// normalize Q distribution to sum to 1
const NN = N * N;
const Q = zeros(NN);
for (let q = 0; q < NN; q++) { Q[q] = Math.max(Qu[q] / qsum, 1e-100); }
let cost = 0.0;
const grad = [];
for (let i = 0; i < N; i++) {
const gsum = new Array(dimentions); // init grad for point i
for (let d = 0; d < dimentions; d++) { gsum[d] = 0.0; }
for (let j = 0; j < N; j++) {
cost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)
const premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * Qu[i * N + j];
for (let d = 0; d < dimentions; d++) {
gsum[d] += premult * (solution[i][d] - solution[j][d]);
}
}
grad.push(gsum);
}
return { cost, grad };
},
};
tsnejs.tSNE = tSNE; // export tSNE class
}
init();
| pairwiseDistances | identifier_name |
tsne2.js | // create main global object |
// utilitity that creates contiguous vector of zeros of size n
function zeros(n) {
if (typeof (n) === 'undefined' || isNaN(n)) { return []; }
if (typeof ArrayBuffer === 'undefined') {
// lacking browser support
const arr = new Array(n);
for (let i = 0; i < n; i++) { arr[i] = 0; }
return arr;
}
return new Float64Array(n); // typed arrays are faster
}
// return 0 mean unit standard deviation random number
let returnCache = false;
let gaussCache = 0.0;
function gaussRandom() {
if (returnCache) {
returnCache = false;
return gaussCache;
}
const u = (2 * Math.random()) - 1;
const v = (2 * Math.random()) - 1;
const r = (u * u) + (v * v);
if (r === 0 || r > 1) return gaussRandom();
const c = Math.sqrt((-2 * Math.log(r)) / r);
gaussCache = v * c; // cache this for next function call for efficiency
returnCache = true;
return u * c;
}
// return random normal number
function randn(mu, std) {
return mu + (gaussRandom() * std);
}
// utility that returns 2d array filled with random numbers
// or with value s, if provided
function randn2d(n, d, s) {
const uses = typeof s !== 'undefined';
const x = [];
for (let i = 0; i < n; i++) {
const xhere = [];
for (let j = 0; j < d; j++) {
if (uses) {
xhere.push(s);
} else {
xhere.push(randn(0.0, 1e-4));
}
}
x.push(xhere);
}
return x;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function pairwiseDistances(X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X[0].length;
assert(N > 0, ' X is empty? You must have some data!');
assert(D > 0, ' X[0] is empty? Where is the data?');
const pairwiseDistancesOfInput = pairwiseDistances(X); // convert X to distances using gaussian kernel
this.P = d2p(pairwiseDistancesOfInput, this.perplexity, 1e-4); // attach to object
this.N = N; // back up the size of the dataset
this.initSolution(); // refresh this
},
// this function takes a given distance matrix and creates
// matrix P from them.
// D is assumed to be provided as a list of lists, and should be symmetric
initDataDist(distancesMatrix) {
const N = distancesMatrix.length;
assert(N > 0, ' X is empty? You must have some data!');
// convert D to a (fast) typed array version
const convertedDistances = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = distancesMatrix[i][j];
convertedDistances[i * N + j] = d;
convertedDistances[j * N + i] = d;
}
}
this.P = d2p(convertedDistances, this.perplexity, 1e-4);
this.N = N;
this.initSolution(); // refresh this
},
// (re)initializes the solution to random
initSolution() {
// generate random solution to t-SNE
this.solution = randn2d(this.N, this.dim); // the solution
this.gains = randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions
this.ystep = randn2d(this.N, this.dim, 0.0); // momentum accumulator
this.iter = 0;
},
// return pointer to current solution
getSolution() {
return this.solution;
},
// perform a single step of optimization to improve the embedding
step() {
this.iter += 1;
const N = this.N;
const cg = this.costAndGradient(this.solution); // evaluate gradient
const cost = cg.cost;
const grad = cg.grad;
// perform gradient step
const ymean = zeros(this.dim);
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
const gid = grad[i][d];
const sid = this.ystep[i][d];
const gainid = this.gains[i][d];
// compute gain update
let newgain = sign(gid) === sign(sid) ? gainid * 0.8 : gainid + 0.2;
if (newgain < 0.01) newgain = 0.01; // clamp
this.gains[i][d] = newgain; // store for next turn
// compute momentum step direction
const momval = this.iter < 250 ? 0.5 : 0.8;
const newsid = momval * sid - this.epsilon * newgain * grad[i][d];
this.ystep[i][d] = newsid; // remember the step we took
// step!
this.solution[i][d] += newsid;
ymean[d] += this.solution[i][d]; // accumulate mean so that we can center later
}
}
// reproject Y to be zero mean
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
this.solution[i][d] -= ymean[d] / N;
}
}
if (this.iter % 100 === 0) console.log(`iter ${this.iter}, cost: ${cost}`);
return cost; // return current cost
},
// return cost and gradient, given an arrangement
costAndGradient(solution) {
const N = this.N;
const dimentions = this.dim; // dim of output space
const P = this.P;
const pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima
// compute current Q distribution, unnormalized first
const Qu = zeros(N * N);
let qsum = 0.0;
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
let dsum = 0.0;
for (let d = 0; d < dimentions; d++) {
const dhere = solution[i][d] - solution[j][d];
dsum += dhere * dhere;
}
const qu = 1.0 / (1.0 + dsum); // Student t-distribution
Qu[i * N + j] = qu;
Qu[j * N + i] = qu;
qsum += 2 * qu;
}
}
// normalize Q distribution to sum to 1
const NN = N * N;
const Q = zeros(NN);
for (let q = 0; q < NN; q++) { Q[q] = Math.max(Qu[q] / qsum, 1e-100); }
let cost = 0.0;
const grad = [];
for (let i = 0; i < N; i++) {
const gsum = new Array(dimentions); // init grad for point i
for (let d = 0; d < dimentions; d++) { gsum[d] = 0.0; }
for (let j = 0; j < N; j++) {
cost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)
const premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * Qu[i * N + j];
for (let d = 0; d < dimentions; d++) {
gsum[d] += premult * (solution[i][d] - solution[j][d]);
}
}
grad.push(gsum);
}
return { cost, grad };
},
};
tsnejs.tSNE = tSNE; // export tSNE class
}
init(); | const tsnejs = window.tsnejs || { REVISION: 'ALPHA' };
function assert(condition, message) {
if (!condition) { throw message || 'Assertion failed'; }
} | random_line_split |
tsne2.js | // create main global object
const tsnejs = window.tsnejs || { REVISION: 'ALPHA' };
function assert(condition, message) {
if (!condition) |
}
// utilitity that creates contiguous vector of zeros of size n
function zeros(n) {
if (typeof (n) === 'undefined' || isNaN(n)) { return []; }
if (typeof ArrayBuffer === 'undefined') {
// lacking browser support
const arr = new Array(n);
for (let i = 0; i < n; i++) { arr[i] = 0; }
return arr;
}
return new Float64Array(n); // typed arrays are faster
}
// return 0 mean unit standard deviation random number
let returnCache = false;
let gaussCache = 0.0;
function gaussRandom() {
if (returnCache) {
returnCache = false;
return gaussCache;
}
const u = (2 * Math.random()) - 1;
const v = (2 * Math.random()) - 1;
const r = (u * u) + (v * v);
if (r === 0 || r > 1) return gaussRandom();
const c = Math.sqrt((-2 * Math.log(r)) / r);
gaussCache = v * c; // cache this for next function call for efficiency
returnCache = true;
return u * c;
}
// return random normal number
function randn(mu, std) {
return mu + (gaussRandom() * std);
}
// utility that returns 2d array filled with random numbers
// or with value s, if provided
function randn2d(n, d, s) {
const uses = typeof s !== 'undefined';
const x = [];
for (let i = 0; i < n; i++) {
const xhere = [];
for (let j = 0; j < d; j++) {
if (uses) {
xhere.push(s);
} else {
xhere.push(randn(0.0, 1e-4));
}
}
x.push(xhere);
}
return x;
}
// compute L2 distance between two vectors
function computeVectorDistance(x1, x2) {
const D = x1.length;
let d = 0;
for (let i = 0; i < D; i++) {
const x1i = x1[i];
const x2i = x2[i];
d += (x1i - x2i) * (x1i - x2i);
}
return d;
}
// compute pairwise distance in all vectors in X
function pairwiseDistances(X) {
const N = X.length;
const dist = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = computeVectorDistance(X[i], X[j]);
dist[(i * N) + j] = d;
dist[(j * N) + i] = d;
}
}
return dist;
}
// helper function
function sign(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return 0;
}
/**
Data: набор данных X = {x1, x2, …, xn},
параметр функции потерь: перплексия Perp,
Параметры оптимизации: количество итераций T, скорость обучения η, момент α(t).
Result: представление данных Y(T) = {y1, y2, …, yn} (в 2D или 3D).
begin
вычислить попарное сходство pj|i c перплексией Perp (используя формулу 1)
установить pij = (pj|i + pi|j)/2n
инициализировать Y(0) = {y1, y2, …, yn} точками нормального распределения (mean=0, sd=1e-4)
for t = 1 to T do
вычислить сходство точек в пространстве отображения qij (по формуле 4)
вычислить градиент δCost/δy (по формуле 5)
установить Y(t) = Y(t-1) + ηδCost/δy + α(t)(Y(t-1) - Y(t-2))
end
end
**/
function init() {
// compute (p_{i|j} + p_{j|i})/(2n)
function d2p(pairwiseDistances_, perplexity, precision) {
const rootOfDistancesArrayLength = Math.sqrt(pairwiseDistances_.length); // this better be an integer
const distancesArrayLength = Math.floor(rootOfDistancesArrayLength);
assert(distancesArrayLength === rootOfDistancesArrayLength, 'D should have square number of elements.');
const entropyTarget = Math.log(perplexity); // target entropy of distribution
const probabilityMatrix = zeros(distancesArrayLength * distancesArrayLength); // temporary probability matrix
const prow = zeros(distancesArrayLength); // a temporary storage compartment
for (let i = 0; i < distancesArrayLength; i++) {
let betamin = -Infinity;
let betamax = Infinity;
let beta = 1; // initial value of precision
let done = false;
const maxtries = 50;
// perform binary search to find a suitable precision beta
// so that the entropy of the distribution is appropriate
let iteration = 0;
while (!done) {
// debugger;
// compute entropy and kernel row with beta precision
let psum = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ = Math.exp(-pairwiseDistances_[i * distancesArrayLength + j] * beta);
if (i === j) { probabilityJ = 0; } // we dont care about diagonals
prow[j] = probabilityJ;
psum += probabilityJ;
}
// normalize p and compute entropy
let entropy = 0.0;
for (let j = 0; j < distancesArrayLength; j++) {
let probabilityJ;
if (psum === 0) {
probabilityJ = 0;
} else {
probabilityJ = prow[j] / psum;
}
prow[j] = probabilityJ;
if (probabilityJ > 1e-7) entropy -= probabilityJ * Math.log(probabilityJ);
}
// adjust beta based on result
if (entropy > entropyTarget) {
// entropy was too high (distribution too diffuse)
// so we need to increase the precision for more peaky distribution
betamin = beta; // move up the bounds
if (betamax === Infinity) {
beta *= 2;
} else {
beta = (beta + betamax) / 2;
}
} else {
// converse case. make distrubtion less peaky
betamax = beta;
if (betamin === -Infinity) {
beta /= 2;
} else {
beta = (beta + betamin) / 2;
}
}
// stopping conditions: too many tries or got a good precision
iteration++;
if (Math.abs(entropy - entropyTarget) < precision) {
done = true;
}
if (iteration >= maxtries) {
done = true;
}
}
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
// copy over the final prow to P at row i
for (let j = 0; j < distancesArrayLength; j++) {
probabilityMatrix[i * distancesArrayLength + j] = prow[j];
}
} // end loop over examples i
// symmetrize P and normalize it to sum to 1 over all ij
const Pout = zeros(distancesArrayLength * distancesArrayLength);
const N2 = distancesArrayLength * 2;
for (let i = 0; i < distancesArrayLength; i++) {
for (let j = 0; j < distancesArrayLength; j++) {
Pout[i * distancesArrayLength + j] = Math.max((probabilityMatrix[i * distancesArrayLength + j] + probabilityMatrix[j * distancesArrayLength + i]) / N2, 1e-100);
}
}
return Pout;
}
function tSNE(opt = {}) {
tSNE.perplexity = opt.perplexity || 30; // effective number of nearest neighbors
tSNE.dim = opt.dim || 2; // by default 2-D tSNE
tSNE.epsilon = opt.epsilon || 10; // learning rate
tSNE.iter = 0;
}
tSNE.prototype = {
// this function takes a set of high-dimensional points
// and creates matrix P from them using gaussian kernel
initDataRaw(X) {
const N = X.length;
const D = X[0].length;
assert(N > 0, ' X is empty? You must have some data!');
assert(D > 0, ' X[0] is empty? Where is the data?');
const pairwiseDistancesOfInput = pairwiseDistances(X); // convert X to distances using gaussian kernel
this.P = d2p(pairwiseDistancesOfInput, this.perplexity, 1e-4); // attach to object
this.N = N; // back up the size of the dataset
this.initSolution(); // refresh this
},
// this function takes a given distance matrix and creates
// matrix P from them.
// D is assumed to be provided as a list of lists, and should be symmetric
initDataDist(distancesMatrix) {
const N = distancesMatrix.length;
assert(N > 0, ' X is empty? You must have some data!');
// convert D to a (fast) typed array version
const convertedDistances = zeros(N * N); // allocate contiguous array
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
const d = distancesMatrix[i][j];
convertedDistances[i * N + j] = d;
convertedDistances[j * N + i] = d;
}
}
this.P = d2p(convertedDistances, this.perplexity, 1e-4);
this.N = N;
this.initSolution(); // refresh this
},
// (re)initializes the solution to random
initSolution() {
// generate random solution to t-SNE
this.solution = randn2d(this.N, this.dim); // the solution
this.gains = randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions
this.ystep = randn2d(this.N, this.dim, 0.0); // momentum accumulator
this.iter = 0;
},
// return pointer to current solution
getSolution() {
return this.solution;
},
// perform a single step of optimization to improve the embedding
step() {
this.iter += 1;
const N = this.N;
const cg = this.costAndGradient(this.solution); // evaluate gradient
const cost = cg.cost;
const grad = cg.grad;
// perform gradient step
const ymean = zeros(this.dim);
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
const gid = grad[i][d];
const sid = this.ystep[i][d];
const gainid = this.gains[i][d];
// compute gain update
let newgain = sign(gid) === sign(sid) ? gainid * 0.8 : gainid + 0.2;
if (newgain < 0.01) newgain = 0.01; // clamp
this.gains[i][d] = newgain; // store for next turn
// compute momentum step direction
const momval = this.iter < 250 ? 0.5 : 0.8;
const newsid = momval * sid - this.epsilon * newgain * grad[i][d];
this.ystep[i][d] = newsid; // remember the step we took
// step!
this.solution[i][d] += newsid;
ymean[d] += this.solution[i][d]; // accumulate mean so that we can center later
}
}
// reproject Y to be zero mean
for (let i = 0; i < N; i++) {
for (let d = 0; d < this.dim; d++) {
this.solution[i][d] -= ymean[d] / N;
}
}
if (this.iter % 100 === 0) console.log(`iter ${this.iter}, cost: ${cost}`);
return cost; // return current cost
},
// return cost and gradient, given an arrangement
costAndGradient(solution) {
const N = this.N;
const dimentions = this.dim; // dim of output space
const P = this.P;
const pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima
// compute current Q distribution, unnormalized first
const Qu = zeros(N * N);
let qsum = 0.0;
for (let i = 0; i < N; i++) {
for (let j = i + 1; j < N; j++) {
let dsum = 0.0;
for (let d = 0; d < dimentions; d++) {
const dhere = solution[i][d] - solution[j][d];
dsum += dhere * dhere;
}
const qu = 1.0 / (1.0 + dsum); // Student t-distribution
Qu[i * N + j] = qu;
Qu[j * N + i] = qu;
qsum += 2 * qu;
}
}
// normalize Q distribution to sum to 1
const NN = N * N;
const Q = zeros(NN);
for (let q = 0; q < NN; q++) { Q[q] = Math.max(Qu[q] / qsum, 1e-100); }
let cost = 0.0;
const grad = [];
for (let i = 0; i < N; i++) {
const gsum = new Array(dimentions); // init grad for point i
for (let d = 0; d < dimentions; d++) { gsum[d] = 0.0; }
for (let j = 0; j < N; j++) {
cost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)
const premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * Qu[i * N + j];
for (let d = 0; d < dimentions; d++) {
gsum[d] += premult * (solution[i][d] - solution[j][d]);
}
}
grad.push(gsum);
}
return { cost, grad };
},
};
tsnejs.tSNE = tSNE; // export tSNE class
}
init();
| { throw message || 'Assertion failed'; } | conditional_block |
fiUnam.py | # -*- coding: utf-8 -*-
import mmap
import os # para conocer los metadatos de archivos a ingresar con la fx cpin()
import math
import os.path, time
class SuperBlock :
"""
El superbloque para este sistema de archivos ocupa el primer cluster
del mismo, es decir, ocupa 2048
"""
f = open('fiunamfs.img','r+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_READ)
# Información de superbloque
name = fs_map[0:8].decode('utf-8') # FiUnamFS
version = fs_map[10:13].decode('utf-8') # 0.4
tagv = fs_map[20:35].decode('utf-8') # Mi Sistema
size_cluster = int(fs_map[40:45].decode('utf-8')) # 2048
numdir_cluster = int(fs_map[47:49].decode('utf-8')) # 04
total_cluster = int(fs_map[52:60].decode('utf-8')) # 00000720
size_dentry = 64 # size dir entry
f.close()
fs_map.close()
class DIRENTRY :
"""
De hecho, estrictamente esta clase no es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb | def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if os.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1],i_lastcluster+1)
self.over(inodes[j+1],i_lastcluster+1)
inodes[j].finit_cluster=i_lastcluster+1
def over(self,inode,newcluster):
fe_cluster = str(newcluster)
cluster_zeros = inode.offset_fcluster- len(fe_cluster)
ptrb = self.sb.size_cluster + inode.numdir*self.sb.size_dentry+25
self.fs_map[ptrb:ptrb+inode.offset_fcluster]=bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros),'utf-8')
def cp(self,fe):
inodes = self.inodes()
inodes.sort(key=lambda x: x.finit_cluster)
fe_size = os.stat(fe).st_size
fe_numclusters = math.ceil(fe_size/self.sb.size_cluster)
if len(inodes) == 0:
i_lastcluster = 4
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
elif len(inodes) == 1:
self.defrag()
i_lastcluster = inodes[0].finit_cluster + math.ceil(inodes[0].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
else:
sucess = False
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
# espacio en clusters entre archivos
cluster_space = inodes[j+1].finit_cluster - i_lastcluster
# Usando el algoritmo FIFO, guardamos nuestro archivo en el
# primer bloque en el que quepa
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
break
if not sucess:
i_lastcluster = inodes[len(inodes)-1].finit_cluster + math.ceil(inodes[len(inodes)-1].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
if not sucess:
print("cpin: " + fe + ": file too largeeee") | = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
| conditional_block |
fiUnam.py | # -*- coding: utf-8 -*-
import mmap
import os # para conocer los metadatos de archivos a ingresar con la fx cpin()
import math
import os.path, time
class | :
"""
El superbloque para este sistema de archivos ocupa el primer cluster
del mismo, es decir, ocupa 2048
"""
f = open('fiunamfs.img','r+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_READ)
# Información de superbloque
name = fs_map[0:8].decode('utf-8') # FiUnamFS
version = fs_map[10:13].decode('utf-8') # 0.4
tagv = fs_map[20:35].decode('utf-8') # Mi Sistema
size_cluster = int(fs_map[40:45].decode('utf-8')) # 2048
numdir_cluster = int(fs_map[47:49].decode('utf-8')) # 04
total_cluster = int(fs_map[52:60].decode('utf-8')) # 00000720
size_dentry = 64 # size dir entry
f.close()
fs_map.close()
class DIRENTRY :
"""
De hecho, estrictamente esta clase no es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if os.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1],i_lastcluster+1)
self.over(inodes[j+1],i_lastcluster+1)
inodes[j].finit_cluster=i_lastcluster+1
def over(self,inode,newcluster):
fe_cluster = str(newcluster)
cluster_zeros = inode.offset_fcluster- len(fe_cluster)
ptrb = self.sb.size_cluster + inode.numdir*self.sb.size_dentry+25
self.fs_map[ptrb:ptrb+inode.offset_fcluster]=bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros),'utf-8')
def cp(self,fe):
inodes = self.inodes()
inodes.sort(key=lambda x: x.finit_cluster)
fe_size = os.stat(fe).st_size
fe_numclusters = math.ceil(fe_size/self.sb.size_cluster)
if len(inodes) == 0:
i_lastcluster = 4
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
elif len(inodes) == 1:
self.defrag()
i_lastcluster = inodes[0].finit_cluster + math.ceil(inodes[0].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
else:
sucess = False
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
# espacio en clusters entre archivos
cluster_space = inodes[j+1].finit_cluster - i_lastcluster
# Usando el algoritmo FIFO, guardamos nuestro archivo en el
# primer bloque en el que quepa
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
break
if not sucess:
i_lastcluster = inodes[len(inodes)-1].finit_cluster + math.ceil(inodes[len(inodes)-1].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
if not sucess:
print("cpin: " + fe + ": file too largeeee") | SuperBlock | identifier_name |
fiUnam.py | # -*- coding: utf-8 -*-
import mmap
import os # para conocer los metadatos de archivos a ingresar con la fx cpin()
import math
import os.path, time
class SuperBlock :
"""
El superbloque para este sistema de archivos ocupa el primer cluster
del mismo, es decir, ocupa 2048
"""
f = open('fiunamfs.img','r+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_READ)
# Información de superbloque
name = fs_map[0:8].decode('utf-8') # FiUnamFS
version = fs_map[10:13].decode('utf-8') # 0.4
tagv = fs_map[20:35].decode('utf-8') # Mi Sistema
size_cluster = int(fs_map[40:45].decode('utf-8')) # 2048
numdir_cluster = int(fs_map[47:49].decode('utf-8')) # 04
total_cluster = int(fs_map[52:60].decode('utf-8')) # 00000720
size_dentry = 64 # size dir entry
f.close()
fs_map.close()
class DIRENTRY :
"""
De hecho, estrictamente esta clase no es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if o | def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1],i_lastcluster+1)
self.over(inodes[j+1],i_lastcluster+1)
inodes[j].finit_cluster=i_lastcluster+1
def over(self,inode,newcluster):
fe_cluster = str(newcluster)
cluster_zeros = inode.offset_fcluster- len(fe_cluster)
ptrb = self.sb.size_cluster + inode.numdir*self.sb.size_dentry+25
self.fs_map[ptrb:ptrb+inode.offset_fcluster]=bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros),'utf-8')
def cp(self,fe):
inodes = self.inodes()
inodes.sort(key=lambda x: x.finit_cluster)
fe_size = os.stat(fe).st_size
fe_numclusters = math.ceil(fe_size/self.sb.size_cluster)
if len(inodes) == 0:
i_lastcluster = 4
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
elif len(inodes) == 1:
self.defrag()
i_lastcluster = inodes[0].finit_cluster + math.ceil(inodes[0].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
else:
sucess = False
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
# espacio en clusters entre archivos
cluster_space = inodes[j+1].finit_cluster - i_lastcluster
# Usando el algoritmo FIFO, guardamos nuestro archivo en el
# primer bloque en el que quepa
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
break
if not sucess:
i_lastcluster = inodes[len(inodes)-1].finit_cluster + math.ceil(inodes[len(inodes)-1].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
if not sucess:
print("cpin: " + fe + ": file too largeeee") | s.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
| identifier_body |
fiUnam.py | # -*- coding: utf-8 -*-
import mmap
import os # para conocer los metadatos de archivos a ingresar con la fx cpin()
import math
import os.path, time
class SuperBlock :
"""
El superbloque para este sistema de archivos ocupa el primer cluster
del mismo, es decir, ocupa 2048
"""
f = open('fiunamfs.img','r+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_READ)
# Información de superbloque
name = fs_map[0:8].decode('utf-8') # FiUnamFS
version = fs_map[10:13].decode('utf-8') # 0.4
tagv = fs_map[20:35].decode('utf-8') # Mi Sistema
size_cluster = int(fs_map[40:45].decode('utf-8')) # 2048 | numdir_cluster = int(fs_map[47:49].decode('utf-8')) # 04
total_cluster = int(fs_map[52:60].decode('utf-8')) # 00000720
size_dentry = 64 # size dir entry
f.close()
fs_map.close()
class DIRENTRY :
"""
De hecho, estrictamente esta clase no es un inode ya que estamos
guardando el nombre del archivo en él y eso no pasa en los verdaderos
inodes y obviamente tampoco estamos guardando
permisos ni propietarios porque NO los tenemos
"""
offset_fname = 15
offset_fsize = 8
offset_fcluster = 5
offset_fcreated = 14
offset_fmodif = 14
fname = "" # 0-15
fsize = 0 # 16-24
finit_cluster = 0 # 25-30
fcreated = "" # 31-45
fmodif = "" # 46-60
numdir = -1 # numero entre 0-63
# por las especificaciones
def __init__(self, dir_entry):
self.fname = dir_entry[0:15].decode('utf-8').lstrip()
self.fsize = int(dir_entry[16:24].decode('utf-8'))
self.finit_cluster = int(dir_entry[25:30].decode('utf-8'))
self.fcreated = dir_entry[31:45].decode('utf-8')
self.fmodif = dir_entry[46:60].decode('utf-8')
class FIFS:
f = open('fiunamfs.img','a+b')
fs_map = mmap.mmap(f.fileno(),0,access=mmap.ACCESS_WRITE)
sb = SuperBlock()
dentry_notused ='Xx.xXx.xXx.xXx.'
# Función interna
def inodes(self):
# usamos del 1-4 clusters, es decir 2048*4 = 4096
# las entradas miden 64 por lo tanto 4096/64 = 128, entonces el rango
# del for 0-128
inodes = []
for j in range(0,128):
# El directorio se encuentra en los cluster de 1-4 y cada cluster
# mide 2048 por lo tanto debemo ir en 2048, el cluser 0 es el
# superblock
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused != i.fname:
i.numdir = j
inodes.append(i)
return inodes
def search(self,fe):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if fe == i.fname:
i.numdir = j
return i
return None
def registerFile(self,fe,cluster):
for j in range(0,128):
prtb = self.sb.size_cluster + j*self.sb.size_dentry
i = DIRENTRY(self.fs_map[prtb:prtb + self.sb.size_dentry])
if self.dentry_notused == i.fname:
# tener cuidado con longitud de nombres
spaces = i.offset_fname - len(fe)
self.fs_map[prtb:prtb + i.offset_fname] = bytes(fe.rjust(len(fe)+spaces)).encode('utf-8')
fe_size = str(os.stat(fe).st_size)
size_zeros = i.offset_fsize - len(fe_size)
new_ptrb = prtb + i.offset_fname + 1
self.fs_map[new_ptrb :new_ptrb + i.offset_fsize] = bytes(fe_size.zfill(len(fe_size)+size_zeros)).encode('utf-8')
fe_cluster = str(cluster)
cluster_zeros = i.offset_fcluster - len(fe_cluster)
new_ptrb += i.offset_fsize + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcluster] = bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros)).encode('utf-8')
fe_date_create= time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getctime(fe)))
new_ptrb += i.offset_fcluster + 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fcreated] = bytes(fe_date_create).encode('utf-8')
fe_date_modif=time.strftime('%Y%m%d%H%M%S', time.gmtime(os.path.getmtime(fe)))
new_ptrb += i.offset_fcreated+ 1
self.fs_map[new_ptrb:new_ptrb + i.offset_fmodif] = bytes(fe_date_modif).encode('utf-8')
break
def cpint(self,inode,clustdest):
ptrb=self.sb.size_cluster*inode.finit_cluster
buffer=self.fs_map[ptrb:ptrb+inode.fsize]
ptrbdest=self.sb.size_cluster*clustdest
self.fs_map[ptrbdest:ptrbdest+inode.fsize]=buffer
def close(self):
self.fs_map.close()
self.f.close()
def ls(self):
for i in self.inodes():
f=self.date_format(i.fmodif)
print("%s\t%d\t%d\t%s" %(i.fname,i.finit_cluster,i.fsize,f))
def rm(self,fe):
#Primero buscar si el archivo existe,
#si existe, perdemos la ref hacia él
i = self.search(fe)
if i is None :
print("rm: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
self.fs_map[prtb:prtb + i.offset_fname] = bytes(self.dentry_notused).encode('utf-8')
def date_format(self,date):
months={'01':'Jan','02':'Feb','03':'March','04':'Apr','05':'May',
'06':'Jun','07':'Jul','08':'Aug','09':'Sept','10':'Oct','11':'Nov','12':'Dec'}
a=date[0:4]
m=months.get(date[4:6])
d=date[6:8]
hh=date[8:10]
mm=date[10:12]
ss=date[12:14]
return m+'\t'+d+'\t'+a+'\t'+hh+':'+mm+':'+ss
def cpout(self,fe,dir):
#Primero buscar si el archivo existe,
#si existe, lo copiamos al directorio especificado
i = self.search(fe)
if i is None :
print("cpout: " + fe + " : No such file ")
else :
prtb = self.sb.size_cluster + self.sb.size_dentry*i.numdir
# VERIFICAR QUE EXISTA EL ARCHIVO
filecp = open(fe,"a+b")
cluster = self.sb.size_cluster*i.finit_cluster
# operacion : 2048*inicio_cluster_del_archivo_a_copiar
filecp.write(self.fs_map[cluster:cluster + i.fsize])
filecp.close()
def cpin(self,fe):
# Buscar si no hay un archivo con el nombre recibido
# Si no entonces
# buscar un lugar donde quepa el archivo
# sino hay lugar, desfragmentamos
# si despues de desfragmentar no hay lugar
# mandar error
# cargando todos los dentry que no tenga la cadena 'AQUI NO VA'
# mediante la funcion inodes()
#PRIMERO VALIDAR SI EL ARCHIVO EXISTA
if os.path.isfile(fe):
if len(fe)<15:
if self.search(fe)!=None:
print('Ya existe un archivo con el mismo nombre, renombrar')
else:
self.cp(fe)
else:
print("cpin: " + fe + ": file name too large")
else:
print("cpin: " + fe + ": file not found")
def defrag(self):
inodes=self.inodes()
if(len(inodes)!=0):
if inodes[0].finit_cluster != 5:
self.cpint(inodes[0],5)
self.over(inodes[0],5)
inodes[0].finit_cluster=5
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
self.cpint(inodes[j+1],i_lastcluster+1)
self.over(inodes[j+1],i_lastcluster+1)
inodes[j].finit_cluster=i_lastcluster+1
def over(self,inode,newcluster):
fe_cluster = str(newcluster)
cluster_zeros = inode.offset_fcluster- len(fe_cluster)
ptrb = self.sb.size_cluster + inode.numdir*self.sb.size_dentry+25
self.fs_map[ptrb:ptrb+inode.offset_fcluster]=bytes(fe_cluster.zfill(len(fe_cluster)+cluster_zeros),'utf-8')
def cp(self,fe):
inodes = self.inodes()
inodes.sort(key=lambda x: x.finit_cluster)
fe_size = os.stat(fe).st_size
fe_numclusters = math.ceil(fe_size/self.sb.size_cluster)
if len(inodes) == 0:
i_lastcluster = 4
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
elif len(inodes) == 1:
self.defrag()
i_lastcluster = inodes[0].finit_cluster + math.ceil(inodes[0].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[fe_prtb:fe_prtt] = f.read()
self.registerFile(fe,i_lastcluster+1)
f.close()
else:
print("cpin: " + fe + ": file too large")
else:
sucess = False
for j in range(0,len(inodes)-1):
i_lastcluster = inodes[j].finit_cluster + math.ceil(inodes[j].fsize/self.sb.size_cluster)
# espacio en clusters entre archivos
cluster_space = inodes[j+1].finit_cluster - i_lastcluster
# Usando el algoritmo FIFO, guardamos nuestro archivo en el
# primer bloque en el que quepa
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
break
if not sucess:
i_lastcluster = inodes[len(inodes)-1].finit_cluster + math.ceil(inodes[len(inodes)-1].fsize/self.sb.size_cluster)
cluster_space = self.sb.total_cluster - i_lastcluster
if fe_numclusters <= cluster_space :
f = open(fe,"rb")
# vamos a escribir en i_lastcluster + 1 nuestra info
fe_prtb = self.sb.size_cluster*(i_lastcluster + 1)
fe_prtt = fe_prtb + fe_size
self.fs_map[int(fe_prtb):int(fe_prtt)] = f.read()
# hacer registro de metadatos
self.registerFile(fe,i_lastcluster+1)
f.close()
sucess = True
if not sucess:
print("cpin: " + fe + ": file too largeeee") | random_line_split | |
boilerplate.py | #-*- coding: utf8 -*-
#credits to https://github.com/pytorch/examples/blob/master/imagenet/main.py
import shutil, time, logging
import torch
import torch.optim
import numpy as np
import visdom, copy
from datetime import datetime
from collections import defaultdict
from generic_models.yellowfin import YFOptimizer
logger = logging.getLogger('app')
logger.setLevel(logging.DEBUG)
class VisdomMonitor(object):
def __init__(self, prefix=None, server='http://localhost', port=8097):
self.__prefix = prefix or datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.__vis = visdom.Visdom(server=server, port=port)
self.__metrics = defaultdict(lambda :defaultdict(list))
self.__win_dict = {}
self.__opts = self._init_opts()
def _init_opts(self):
opts = dict(legend=['Train', 'Validate'])
return opts
def __add(self, name, value, type):
self.__metrics[type][name].append(value)
def _add_val_performance(self, name, value):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
|
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch.sum(target, 1)
tprecision = ttp_sum / tpred_sum
trecall = ttp_sum / ttrue_sum
f2 = ((1 + 4) * tprecision * trecall) / (4 * tprecision + trecall)
return f2
def validate(val_loader, model, criterion, activation=torch.sigmoid):
logger.info('Validating model')
batch_time = AverageMeter()
losses = AverageMeter()
f2s = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute f2
f2 = compute_f2(activation(output), target_var).mean()
f2s.update(f2.data[0], input.size(0))
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.info('Test: [{0}/{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.5f}\t'
'F2: {f2s.avg}\t'.format(
len(val_loader), batch_time=batch_time, loss=losses, f2s=f2s))
return losses.avg
def get_outputs(loader, model, activation):
model.eval()
outputs, targets = [], []
for i, (input, target) in enumerate(loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
outputs.extend(output.cpu().data)
targets.extend(target)
return outputs, targets
def test_model(test_loader, model, activation=None):
logger.info('Testing')
model.eval()
names, results = [], []
for i, (input, name_batch) in enumerate(test_loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
names.extend(name_batch)
results.extend(output.cpu())
if i and i % 20 == 0:
logger.info('Batch %d',i)
return names, results
| current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win | identifier_body |
boilerplate.py | #-*- coding: utf8 -*-
#credits to https://github.com/pytorch/examples/blob/master/imagenet/main.py
import shutil, time, logging
import torch
import torch.optim
import numpy as np
import visdom, copy
from datetime import datetime
from collections import defaultdict
from generic_models.yellowfin import YFOptimizer
logger = logging.getLogger('app')
logger.setLevel(logging.DEBUG)
class VisdomMonitor(object):
def __init__(self, prefix=None, server='http://localhost', port=8097):
self.__prefix = prefix or datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.__vis = visdom.Visdom(server=server, port=port)
self.__metrics = defaultdict(lambda :defaultdict(list))
self.__win_dict = {}
self.__opts = self._init_opts()
def _init_opts(self):
opts = dict(legend=['Train', 'Validate'])
return opts
def __add(self, name, value, type):
self.__metrics[type][name].append(value)
def _add_val_performance(self, name, value):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch.sum(target, 1)
tprecision = ttp_sum / tpred_sum
trecall = ttp_sum / ttrue_sum
f2 = ((1 + 4) * tprecision * trecall) / (4 * tprecision + trecall)
return f2
def validate(val_loader, model, criterion, activation=torch.sigmoid):
logger.info('Validating model')
batch_time = AverageMeter()
losses = AverageMeter()
f2s = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute f2
f2 = compute_f2(activation(output), target_var).mean()
f2s.update(f2.data[0], input.size(0))
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# measure elapsed time | end = time.time()
logger.info('Test: [{0}/{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.5f}\t'
'F2: {f2s.avg}\t'.format(
len(val_loader), batch_time=batch_time, loss=losses, f2s=f2s))
return losses.avg
def get_outputs(loader, model, activation):
model.eval()
outputs, targets = [], []
for i, (input, target) in enumerate(loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
outputs.extend(output.cpu().data)
targets.extend(target)
return outputs, targets
def test_model(test_loader, model, activation=None):
logger.info('Testing')
model.eval()
names, results = [], []
for i, (input, name_batch) in enumerate(test_loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
names.extend(name_batch)
results.extend(output.cpu())
if i and i % 20 == 0:
logger.info('Batch %d',i)
return names, results | batch_time.update(time.time() - end) | random_line_split |
boilerplate.py | #-*- coding: utf8 -*-
#credits to https://github.com/pytorch/examples/blob/master/imagenet/main.py
import shutil, time, logging
import torch
import torch.optim
import numpy as np
import visdom, copy
from datetime import datetime
from collections import defaultdict
from generic_models.yellowfin import YFOptimizer
logger = logging.getLogger('app')
logger.setLevel(logging.DEBUG)
class VisdomMonitor(object):
def __init__(self, prefix=None, server='http://localhost', port=8097):
self.__prefix = prefix or datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.__vis = visdom.Visdom(server=server, port=port)
self.__metrics = defaultdict(lambda :defaultdict(list))
self.__win_dict = {}
self.__opts = self._init_opts()
def _init_opts(self):
opts = dict(legend=['Train', 'Validate'])
return opts
def __add(self, name, value, type):
self.__metrics[type][name].append(value)
def _add_val_performance(self, name, value):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
|
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch.sum(target, 1)
tprecision = ttp_sum / tpred_sum
trecall = ttp_sum / ttrue_sum
f2 = ((1 + 4) * tprecision * trecall) / (4 * tprecision + trecall)
return f2
def validate(val_loader, model, criterion, activation=torch.sigmoid):
logger.info('Validating model')
batch_time = AverageMeter()
losses = AverageMeter()
f2s = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute f2
f2 = compute_f2(activation(output), target_var).mean()
f2s.update(f2.data[0], input.size(0))
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.info('Test: [{0}/{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.5f}\t'
'F2: {f2s.avg}\t'.format(
len(val_loader), batch_time=batch_time, loss=losses, f2s=f2s))
return losses.avg
def get_outputs(loader, model, activation):
model.eval()
outputs, targets = [], []
for i, (input, target) in enumerate(loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
outputs.extend(output.cpu().data)
targets.extend(target)
return outputs, targets
def test_model(test_loader, model, activation=None):
logger.info('Testing')
model.eval()
names, results = [], []
for i, (input, name_batch) in enumerate(test_loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
names.extend(name_batch)
results.extend(output.cpu())
if i and i % 20 == 0:
logger.info('Batch %d',i)
return names, results
| logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True | conditional_block |
boilerplate.py | #-*- coding: utf8 -*-
#credits to https://github.com/pytorch/examples/blob/master/imagenet/main.py
import shutil, time, logging
import torch
import torch.optim
import numpy as np
import visdom, copy
from datetime import datetime
from collections import defaultdict
from generic_models.yellowfin import YFOptimizer
logger = logging.getLogger('app')
logger.setLevel(logging.DEBUG)
class VisdomMonitor(object):
def __init__(self, prefix=None, server='http://localhost', port=8097):
self.__prefix = prefix or datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.__vis = visdom.Visdom(server=server, port=port)
self.__metrics = defaultdict(lambda :defaultdict(list))
self.__win_dict = {}
self.__opts = self._init_opts()
def _init_opts(self):
opts = dict(legend=['Train', 'Validate'])
return opts
def __add(self, name, value, type):
self.__metrics[type][name].append(value)
def _add_val_performance(self, name, value):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win
class AverageMeter(object):
"""Computes and stores the average and current value"""
def | (self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch.sum(target, 1)
tprecision = ttp_sum / tpred_sum
trecall = ttp_sum / ttrue_sum
f2 = ((1 + 4) * tprecision * trecall) / (4 * tprecision + trecall)
return f2
def validate(val_loader, model, criterion, activation=torch.sigmoid):
logger.info('Validating model')
batch_time = AverageMeter()
losses = AverageMeter()
f2s = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute f2
f2 = compute_f2(activation(output), target_var).mean()
f2s.update(f2.data[0], input.size(0))
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.info('Test: [{0}/{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.5f}\t'
'F2: {f2s.avg}\t'.format(
len(val_loader), batch_time=batch_time, loss=losses, f2s=f2s))
return losses.avg
def get_outputs(loader, model, activation):
model.eval()
outputs, targets = [], []
for i, (input, target) in enumerate(loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
outputs.extend(output.cpu().data)
targets.extend(target)
return outputs, targets
def test_model(test_loader, model, activation=None):
logger.info('Testing')
model.eval()
names, results = [], []
for i, (input, name_batch) in enumerate(test_loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
names.extend(name_batch)
results.extend(output.cpu())
if i and i % 20 == 0:
logger.info('Batch %d',i)
return names, results
| __init__ | identifier_name |
hangman.py | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
WORDLIST_FILENAME = "Assignment2\words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed): | lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2)
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this one.
if secretWord == getGuessedWord(secretWord, lettersGuessed):#If the secretWord is equal to the actual word through this function
wordGuessed = True #then the wordGuessed is True and you are done.
break
print(('You have ') + str(mistakesMade) + (' guesses left.'))
print(('Available letters: ') + getAvailableLetters(lettersGuessed))
#This tells the players how many guesses they have by inserting the number of how many guesses are left.
guess = input(('Please guess a letter: ').lower()) #this allows the user to guess a letter and make it into a lowercase just incase.
if guess in secretWord: #If the guess is in the secretWord it would move to the next two if statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else:
lettersGuessed.append(guess) #You would add the guessed letter into the guess [].
print(('Good guess: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secretWord it would move to the next if else statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secret word, the user will have one less guess.
lettersGuessed.append(guess)
mistakesMade -= 1
print(('Oops! That letter is not in my word: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
if wordGuessed == True: #If the wordGuessed is equal to the secretWord, the game will tell the user that they won.
return 'Congratulations, you won!'
elif mistakesMade == 0: #If user has guessed incorrectly 8 times, and there are no more guesses left, the game will tell you that they lost and give you the secretWord.
print(('Sorry, you ran out of guesses. The word was ') + secretWord)
# # When you've completed your hangman function, uncomment these two lines
# # and run this file to test! (hint: you might want to pick your own
# # secretWord while you're testing)
secretWord = chooseWord(wordlist).lower() #This will generate a secret word.
print(hangman(secretWord)) #This will call upon the hangman function, randomized secretWrod, and start the game. | '''
secretWord: string, the word the user is guessing | random_line_split |
hangman.py | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
WORDLIST_FILENAME = "Assignment2\words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
|
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this one.
if secretWord == getGuessedWord(secretWord, lettersGuessed):#If the secretWord is equal to the actual word through this function
wordGuessed = True #then the wordGuessed is True and you are done.
break
print(('You have ') + str(mistakesMade) + (' guesses left.'))
print(('Available letters: ') + getAvailableLetters(lettersGuessed))
#This tells the players how many guesses they have by inserting the number of how many guesses are left.
guess = input(('Please guess a letter: ').lower()) #this allows the user to guess a letter and make it into a lowercase just incase.
if guess in secretWord: #If the guess is in the secretWord it would move to the next two if statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else:
lettersGuessed.append(guess) #You would add the guessed letter into the guess [].
print(('Good guess: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secretWord it would move to the next if else statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secret word, the user will have one less guess.
lettersGuessed.append(guess)
mistakesMade -= 1
print(('Oops! That letter is not in my word: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
if wordGuessed == True: #If the wordGuessed is equal to the secretWord, the game will tell the user that they won.
return 'Congratulations, you won!'
elif mistakesMade == 0: #If user has guessed incorrectly 8 times, and there are no more guesses left, the game will tell you that they lost and give you the secretWord.
print(('Sorry, you ran out of guesses. The word was ') + secretWord)
# # When you've completed your hangman function, uncomment these two lines
# # and run this file to test! (hint: you might want to pick your own
# # secretWord while you're testing)
secretWord = chooseWord(wordlist).lower() #This will generate a secret word.
print(hangman(secretWord)) #This will call upon the hangman function, randomized secretWrod, and start the game. | L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2) | identifier_body |
hangman.py | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
WORDLIST_FILENAME = "Assignment2\words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2)
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this one.
if secretWord == getGuessedWord(secretWord, lettersGuessed):#If the secretWord is equal to the actual word through this function
wordGuessed = True #then the wordGuessed is True and you are done.
break
print(('You have ') + str(mistakesMade) + (' guesses left.'))
print(('Available letters: ') + getAvailableLetters(lettersGuessed))
#This tells the players how many guesses they have by inserting the number of how many guesses are left.
guess = input(('Please guess a letter: ').lower()) #this allows the user to guess a letter and make it into a lowercase just incase.
if guess in secretWord: #If the guess is in the secretWord it would move to the next two if statements.
if guess in lettersGuessed: #If letter has already been guessed
|
else:
lettersGuessed.append(guess) #You would add the guessed letter into the guess [].
print(('Good guess: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secretWord it would move to the next if else statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secret word, the user will have one less guess.
lettersGuessed.append(guess)
mistakesMade -= 1
print(('Oops! That letter is not in my word: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
if wordGuessed == True: #If the wordGuessed is equal to the secretWord, the game will tell the user that they won.
return 'Congratulations, you won!'
elif mistakesMade == 0: #If user has guessed incorrectly 8 times, and there are no more guesses left, the game will tell you that they lost and give you the secretWord.
print(('Sorry, you ran out of guesses. The word was ') + secretWord)
# # When you've completed your hangman function, uncomment these two lines
# # and run this file to test! (hint: you might want to pick your own
# # secretWord while you're testing)
secretWord = chooseWord(wordlist).lower() #This will generate a secret word.
print(hangman(secretWord)) #This will call upon the hangman function, randomized secretWrod, and start the game. | print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------')) | conditional_block |
hangman.py | # Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
WORDLIST_FILENAME = "Assignment2\words.txt"
def | ():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
count = 0 # Count method takes a single arguent, element whose count is to be found. Starting at 0 letters.
for i, c in enumerate(secretWord): #enumerate is a built-in function of Python. It allos us to loop over something and have an automatic counter.
# Starting a loop to check if the guessed letter is inside the secretWord. "i" is the index, and "c" is the vlaue.
if c in lettersGuessed:#Another loop to check if the letter is correct. "c" (value) which is a letter in "lettersGuessed"
count += 1 #You add a number if there is a letter in the lettersguessed that is in the secretWord. This will force the loop to start over until the amount of letters in the secretWord.
if count == len(secretWord): #Once the loop is done. If the amount of counts equal the amount of letters in the secretWord
return True #Then you return True because the word was guessed correctly.
else: #if that does not happen
return False #Then you return false because the the person is missing letters to make the correct guess.
# When you've completed your function isWordGuessed, uncomment these three lines
# and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(isWordGuessed(secretWord, lettersGuessed))
#You start off with count = 0, a is the first value, a is in lettersGuessed
#so the count becomes 1, then the next letter is p, the count becomes 2, and again p, the count becomes
#3, and then l, since there is no "l" in lettersGuessed, the count remains 3, then the next letter, "e"
#the count becomes 4. One the secretWord letters are done, you move onto if the count equals the secretWord count of letters
#then you get True. However, the count in this case is 4 and the secretWord has 5 letters, so the output is false.
# Expected output:
# False
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
count = 0 #Count method takes a single agument, element whose count is to be found. Starting a 0 letters, and each time it is a correct letter, the count will increase.
blank = ['_ '] * len(secretWord) #An underscore will be multiplied by how my letters in the secretWord.
for i, c in enumerate(secretWord):#Creaing a loop, "i" is the count and "c" is the value or letter in the secretWord.
if c in lettersGuessed:#If letter is in lettersGuesssed
count += 1 #The count will increase by 1 since the letter matches a letter in secretWorld.
blank.insert(count-1,c)#You are inserting the letter on the line of words. The the letter will be placed in the index number.
blank.pop(count) #pop() removees and returns the last item in the lsit. This will remove the underscore.
if count == len(secretWord):#Once all of the letters match the number of letter in the secretworkd
return ''.join(str(t) for t in blank) #then return join() merges the string representations of elements in sequence "e" into a string, with seperator string.
else: #if the letter is not in secretWrod
count += 1 #You add the guess as a count.
blank.insert(count-1,'_') #A blank will be placed in hold of the person not guessing the letter. The count-1, is for the index in the word. If the count is 4, then the placement of the letter is 3: 0,1,2,3.
blank.pop(count)#The pop removes the underscore that you need to get rid of because it created an extra index.
if count == len(secretWord):#Once the guessed letters match up to the amount of letter for secretWord you are done with the loop.
return ''.join(str(t) for t in blank) #this brings it all together.
# # When you've completed your function getGuessedWord, uncomment these three lines
# # and run this file to test!
secretWord = 'apple'
lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
print(getGuessedWord(secretWord, lettersGuessed))
#So, you start off with "a", it jumps to the else statement. It counts 1, and inserts a "_" for the first letter of the
#list of "_" because it is index 0. Then "p", count becomes 2 and will go with the first part of the function. It will add a
#"p" in the 1st index, and pop out a blank because a letter was inserted. You do this for every letter.
# # Expected output:
# # '_ pp_ e'
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']# Every letter in the alphebet.
alphabet2 = alphabet[:] #Copy of the alphetbet. There will be letters being removed on this list to show what letters have not been used. Constantly referring back to the first alphebet.
def removeduplicate(L1, L2): #This is a function that has 2 arguments.
L1Start = L1[:]# Have to make sure to make a copy of the first list, to be able to modify the second.
for e in L1:#For an element, or letter in the first argument.
if e in L1Start:#If the leter in LIStart is there, which also implies it is in the first argument
L2.remove(e) #then the letter is removed from the second argyment.
return ''.join(str(e) for e in L2)
return removeduplicate(lettersGuessed, alphabet2) #This will sshow what letters are available.
#Alphabet2 becomes L2. So the letter starts off from the alphabet and as each letter is guessed, it is removed from
#the list and then all the letters left are displayed.
# Hint: You might consider using string.ascii_lowercase, which
# is a string comprised of all lowercase letters.
# # When you've completed your function getAvailableLetters, uncomment these two lines
# # and run this file to test!
# lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']
# print(getAvailableLetters(lettersGuessed))
# # Expected output:
# # abcdfghjlmnoqtuvwxyz
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
intro = str(len(secretWord)) #This allows the computer to know how long the secretWord is.
lettersGuessed = [] #This shows all the letters that have been guessed and will be places in here.
guess = str #turns the guess into a string and makes sure it is a string.
mistakesMade = 8 #This is how many attempts the player had. Every time they guess, the number will decrease by 1.
wordGuessed = False #
print('Welcome to the game, Hangman!')
print(('I am thinking of a word that is ') + intro + (' letters long.'))
print('------------')
#These are the instructions for the game.
while mistakesMade > 0 and mistakesMade <= 8 and wordGuessed is False: #Making sure the guesses are within the limit of guessing. You can only get up to 8 guesses for this one.
if secretWord == getGuessedWord(secretWord, lettersGuessed):#If the secretWord is equal to the actual word through this function
wordGuessed = True #then the wordGuessed is True and you are done.
break
print(('You have ') + str(mistakesMade) + (' guesses left.'))
print(('Available letters: ') + getAvailableLetters(lettersGuessed))
#This tells the players how many guesses they have by inserting the number of how many guesses are left.
guess = input(('Please guess a letter: ').lower()) #this allows the user to guess a letter and make it into a lowercase just incase.
if guess in secretWord: #If the guess is in the secretWord it would move to the next two if statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else:
lettersGuessed.append(guess) #You would add the guessed letter into the guess [].
print(('Good guess: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secretWord it would move to the next if else statements.
if guess in lettersGuessed: #If letter has already been guessed
print(("Oops! You've already guessed that letter: ") + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
else: #If the guess is not in the secret word, the user will have one less guess.
lettersGuessed.append(guess)
mistakesMade -= 1
print(('Oops! That letter is not in my word: ') + getGuessedWord(secretWord, lettersGuessed))
print(('------------'))
if wordGuessed == True: #If the wordGuessed is equal to the secretWord, the game will tell the user that they won.
return 'Congratulations, you won!'
elif mistakesMade == 0: #If user has guessed incorrectly 8 times, and there are no more guesses left, the game will tell you that they lost and give you the secretWord.
print(('Sorry, you ran out of guesses. The word was ') + secretWord)
# # When you've completed your hangman function, uncomment these two lines
# # and run this file to test! (hint: you might want to pick your own
# # secretWord while you're testing)
secretWord = chooseWord(wordlist).lower() #This will generate a secret word.
print(hangman(secretWord)) #This will call upon the hangman function, randomized secretWrod, and start the game. | loadWords | identifier_name |
mn-map.component.ts | /// <reference path="../../../typings/index.d.ts" />
import { ViewChild, ContentChildren,OnInit, Inject, forwardRef, Component,Directive, AfterViewInit, Input, Output, EventEmitter, QueryList, ElementRef } from '@angular/core';
import { Http, Response } from '@angular/http';
import { BackendManagerService } from '../backend-manager.service'
import 'rxjs/add/operator/toPromise';
/**
* Prepresents the generic layer
*/
export interface LeafLayer{
getLayer():L.Layer|Promise<L.Layer>;
addToMap(m, bls, dls);
getName():string;
isBase():boolean;
}
export abstract class LeafLayerBase implements LeafLayer{
abstract getLayer():L.Layer|Promise<L.Layer>;
abstract isBase():boolean;
protected name:string;
getName():string{
return this.name;
}
addToMap(m, bls, dls){
let l = this.getLayer();
m.addLayer(l);
if(this.isBase())
bls[this.getName()] = l;
else
dls[this.getName()] = l;
}
}
/**
* Marker for Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[marker]',
})
export class Marker{
@Input() lon:number;
@Input() lat:number;
@Input() icon:string;
@Input() color:string;
@Input() size:string;
@Input() data:any;
@Input() set geo_data(value){
if (value){
this.data = value;
this.parent.redraw();
}
}
@Output() datachange = new EventEmitter<any>();
constructor(@Inject(forwardRef(() => MarkerLayer)) private parent:MarkerLayer){}
addMarker(lyr){
let m = this.get_marker();
if (m != null){
lyr.addLayer(m);
m.openPopup();
}
}
get_marker(){
if (this.data == null){
if (this.lat !== undefined)
return L.marker([this.lat, this.lon]);
else return null;
} else {
if (this.data.geometry) {
if (this.data.geometry.coordinates[0] != 0) {
let pop = "<div><h3>"+this.data.properties.RagioneSociale+"</h3><p>"+this.data.properties.Indirizzo+", "+this.data.properties.Frazione + " "+this.data.properties.Comune+"</p></div>";
return L.marker(this.data.geometry.coordinates).bindPopup(pop).openPopup();
}
}
}
}
}
/**
* Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[markers]',
})
export class MarkerLayer extends LeafLayerBase{
@Input() name:string;
@ContentChildren(Marker) dataLayers: QueryList<Marker>;
layer;
getLayer(){
this.layer = L.featureGroup();
this.redraw();
return this.layer;
}
redraw(){
this.layer.clearLayers();
this.dataLayers.forEach(element => {
element.addMarker(this.layer);
});
}
isBase(){
return false;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'mapboxlayer',
})
export class MapboxLayer extends LeafLayerBase{
@Input() name:string;
@Input() owner:string;
@Input() id:string;
@Input() token:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
let url = "https://api.mapbox.com/styles/v1/"+this.owner+"/"+this.id+"/tiles/256/{z}/{x}/{y}?access_token="+this.token;
console.log(url);
let attribution = "";
return L.tileLayer(url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: attribution});
}
isBase(){
return true;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'tile_layer',
})
export class BaseLayer extends LeafLayerBase{
@Input() name:string;
@Input() url:string;
@Input() attribution:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
/**
* Standard Tile Layer
* @param name: one of "osm", "bing", "google", ""
*/
@Directive({
selector: 'namedlayer',
})
export class NamedLayer extends LeafLayerBase {
@Input() layer:string;
configs = {
osms:{name:"OpenStreetMap", url:"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
osm:{name:"OpenStreetMap", url:"http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
positron:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
darkmatter:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
};
getLayer(){
if(Object.keys(this.configs).indexOf(this.layer) >= 0){
let lyr = this.configs[this.layer];
return L.tileLayer(lyr.url, {minZoom: lyr.minzoom, maxZoom: lyr.maxzoom, attribution: lyr.attribution});
}
return null;
}
isBase(){
return true;
}
getName(){
if(this.layer in this.configs){
return this.configs[this.layer].name;
}
return "";
}
}
@Directive({
selector: 'datalayer',
})
export class DataLayer extends LeafLayerBase {
@Input() type:string;
@Input() mode:string;
@Input() src:string;
@Input() aggregator:string;
@Input() field:string;
@Input() basestyle:any={};
@Input() propertystyle:any={};
@Input() styledproperty:string;
@Output() areaclick = new EventEmitter<any>();
constructor(private http:Http){
super();
}
the_style(basestyle, styledproperty, propertystyle){
return function(feature){
let gstyle = basestyle;
let v = feature.properties[styledproperty];
let astyle = propertystyle[v];
Object.assign(gstyle, astyle);
return gstyle;
}
}
getLayer():Promise<L.Layer>{
if (this.type == "geojson")
return new Promise<L.Layer>((resolve, react) =>{
this.http.get(this.aggregator).toPromise().then(x=>{
console.log(x);
resolve(L.geoJSON(x.json(), {
style:this.the_style(this.basestyle, this.styledproperty, this.propertystyle),
onEachFeature:(feature, lyr) => {
lyr.on({
click:(e)=>{
this.areaclick.emit({
field:feature.properties[this.field],
feature:feature
});
}
});
}
}));
});
});
return null;
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls.push(x);
});
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'cityosbglayer',
})
export class CityOSBackgroundLayer extends LeafLayerBase{
@Input() conf:any;
name:string;
url:string;
attribution:string;
minzoom:number = 1;
maxzoom:number = 20;
ngOnInit(){
this.name = this.conf.name;
this.url = this.conf.url;
this.attribution = this.conf.attribution;
}
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
@Directive({
selector: 'cityoslayer',
})
export class CityOSLayer extends LeafLayerBase {
@Input() mappingSpace:number;
@Output() itemclick = new EventEmitter<any>();
items;
styles;
int_styles = {}
constructor(private bms:BackendManagerService){
|
getLayer():Promise<L.Layer>{
return new Promise<L.Layer>((resolve, react) =>{
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/styles").getAll().then(s=>{
this.styles = s;
for(let style of this.styles){
this.int_styles[style.slug] = style;
}
console.log(this.int_styles);
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/geolocations").getAll().then(x=>{
console.log(x);
let geoj = L.geoJSON(x , {
style:(feature =>{
return this.int_styles[feature.properties.types[0]];
}),
});
resolve( geoj );
});
});
});
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls["CityOS"] = x;
});
}
}
@Component({
selector: '[mn-map]',
templateUrl: './mn-map.component.html',
styleUrls: ['./mn-map.component.css'],
})
export class MnMapComponent {
private makeid() {
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for( var i=0; i < 5; i++ )
text += possible.charAt(Math.floor(Math.random() * possible.length));
return text;
}
@Input() conf:any;
@Input() map_id:string;
@Input() center:number[] = [51.505,-0.09];
@Input() minzoom:number = 0;
@Input() maxzoom:number = 20;
@Input() startzoom:number = 13;
@Input() controls = true;
@Input() scrollZoom = false;
@Input() zoomControl = true;
@ContentChildren(BaseLayer) baseLayers: QueryList<LeafLayer>;
@ContentChildren(NamedLayer) namedLayers: QueryList<LeafLayer>;
@ContentChildren(DataLayer) dataLayers: QueryList<LeafLayer>;
@ContentChildren(MarkerLayer) markerLayers: QueryList<LeafLayer>;
@ContentChildren(CityOSLayer) cityoslayer: QueryList<LeafLayer>;
@ContentChildren(CityOSBackgroundLayer) cityosbglayer: QueryList<LeafLayer>;
@ContentChildren(MapboxLayer) mapboxLayers: QueryList<LeafLayer>;
@Output() click:EventEmitter<any> = new EventEmitter();
@Output() movestart:EventEmitter<any> = new EventEmitter();
@Output() moveend:EventEmitter<any> = new EventEmitter();
public map;
layers:Array<LeafLayer> = [];
private addLayer(layer:LeafLayer){
this.layers.push(layer);
}
grid_unit:number = 170;
grid_gutter:number = 15;
constructor(private elementRef: ElementRef){
if(this.map_id == null)
this.map_id = this.makeid();
}
protected prepareLayers(){
this.baseLayers.forEach(element => {
this.addLayer(element);
});
this.namedLayers.forEach(element => {
this.addLayer(element);
});
this.dataLayers.forEach(element => {
this.addLayer(element);
});
this.markerLayers.forEach(element => {
this.addLayer(element);
});
this.mapboxLayers.forEach(element => {
this.addLayer(element);
});
this.cityoslayer.forEach(element => {
this.addLayer(element);
});
this.cityosbglayer.forEach(element => {
this.addLayer(element);
});
}
ngAfterViewInit() {
try{
this.map = L.map(this.map_id, {
minZoom:this.minzoom,
maxZoom:this.maxzoom,
scrollWheelZoom:this.scrollZoom,
zoomControl: this.zoomControl
}).setView([this.center[0], this.center[1]], this.startzoom);
this.prepareLayers();
let bls = {};
let dls = {};
for(let lyr of this.layers){
lyr.addToMap(this.map, bls, dls);
}
this.map._onResize();
if(this.controls)
L.control.layers(bls, dls).addTo(this.map);
} catch (ex){
console.log(ex);
}
}
}
| super();
}
| identifier_body |
mn-map.component.ts | /// <reference path="../../../typings/index.d.ts" />
import { ViewChild, ContentChildren,OnInit, Inject, forwardRef, Component,Directive, AfterViewInit, Input, Output, EventEmitter, QueryList, ElementRef } from '@angular/core';
import { Http, Response } from '@angular/http';
import { BackendManagerService } from '../backend-manager.service'
import 'rxjs/add/operator/toPromise';
/**
* Prepresents the generic layer
*/
export interface LeafLayer{
getLayer():L.Layer|Promise<L.Layer>;
addToMap(m, bls, dls);
getName():string;
isBase():boolean;
} | export abstract class LeafLayerBase implements LeafLayer{
abstract getLayer():L.Layer|Promise<L.Layer>;
abstract isBase():boolean;
protected name:string;
getName():string{
return this.name;
}
addToMap(m, bls, dls){
let l = this.getLayer();
m.addLayer(l);
if(this.isBase())
bls[this.getName()] = l;
else
dls[this.getName()] = l;
}
}
/**
* Marker for Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[marker]',
})
export class Marker{
@Input() lon:number;
@Input() lat:number;
@Input() icon:string;
@Input() color:string;
@Input() size:string;
@Input() data:any;
@Input() set geo_data(value){
if (value){
this.data = value;
this.parent.redraw();
}
}
@Output() datachange = new EventEmitter<any>();
constructor(@Inject(forwardRef(() => MarkerLayer)) private parent:MarkerLayer){}
addMarker(lyr){
let m = this.get_marker();
if (m != null){
lyr.addLayer(m);
m.openPopup();
}
}
get_marker(){
if (this.data == null){
if (this.lat !== undefined)
return L.marker([this.lat, this.lon]);
else return null;
} else {
if (this.data.geometry) {
if (this.data.geometry.coordinates[0] != 0) {
let pop = "<div><h3>"+this.data.properties.RagioneSociale+"</h3><p>"+this.data.properties.Indirizzo+", "+this.data.properties.Frazione + " "+this.data.properties.Comune+"</p></div>";
return L.marker(this.data.geometry.coordinates).bindPopup(pop).openPopup();
}
}
}
}
}
/**
* Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[markers]',
})
export class MarkerLayer extends LeafLayerBase{
@Input() name:string;
@ContentChildren(Marker) dataLayers: QueryList<Marker>;
layer;
getLayer(){
this.layer = L.featureGroup();
this.redraw();
return this.layer;
}
redraw(){
this.layer.clearLayers();
this.dataLayers.forEach(element => {
element.addMarker(this.layer);
});
}
isBase(){
return false;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'mapboxlayer',
})
export class MapboxLayer extends LeafLayerBase{
@Input() name:string;
@Input() owner:string;
@Input() id:string;
@Input() token:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
let url = "https://api.mapbox.com/styles/v1/"+this.owner+"/"+this.id+"/tiles/256/{z}/{x}/{y}?access_token="+this.token;
console.log(url);
let attribution = "";
return L.tileLayer(url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: attribution});
}
isBase(){
return true;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'tile_layer',
})
export class BaseLayer extends LeafLayerBase{
@Input() name:string;
@Input() url:string;
@Input() attribution:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
/**
* Standard Tile Layer
* @param name: one of "osm", "bing", "google", ""
*/
@Directive({
selector: 'namedlayer',
})
export class NamedLayer extends LeafLayerBase {
@Input() layer:string;
configs = {
osms:{name:"OpenStreetMap", url:"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
osm:{name:"OpenStreetMap", url:"http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
positron:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
darkmatter:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
};
getLayer(){
if(Object.keys(this.configs).indexOf(this.layer) >= 0){
let lyr = this.configs[this.layer];
return L.tileLayer(lyr.url, {minZoom: lyr.minzoom, maxZoom: lyr.maxzoom, attribution: lyr.attribution});
}
return null;
}
isBase(){
return true;
}
getName(){
if(this.layer in this.configs){
return this.configs[this.layer].name;
}
return "";
}
}
@Directive({
selector: 'datalayer',
})
export class DataLayer extends LeafLayerBase {
@Input() type:string;
@Input() mode:string;
@Input() src:string;
@Input() aggregator:string;
@Input() field:string;
@Input() basestyle:any={};
@Input() propertystyle:any={};
@Input() styledproperty:string;
@Output() areaclick = new EventEmitter<any>();
constructor(private http:Http){
super();
}
the_style(basestyle, styledproperty, propertystyle){
return function(feature){
let gstyle = basestyle;
let v = feature.properties[styledproperty];
let astyle = propertystyle[v];
Object.assign(gstyle, astyle);
return gstyle;
}
}
getLayer():Promise<L.Layer>{
if (this.type == "geojson")
return new Promise<L.Layer>((resolve, react) =>{
this.http.get(this.aggregator).toPromise().then(x=>{
console.log(x);
resolve(L.geoJSON(x.json(), {
style:this.the_style(this.basestyle, this.styledproperty, this.propertystyle),
onEachFeature:(feature, lyr) => {
lyr.on({
click:(e)=>{
this.areaclick.emit({
field:feature.properties[this.field],
feature:feature
});
}
});
}
}));
});
});
return null;
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls.push(x);
});
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'cityosbglayer',
})
export class CityOSBackgroundLayer extends LeafLayerBase{
@Input() conf:any;
name:string;
url:string;
attribution:string;
minzoom:number = 1;
maxzoom:number = 20;
ngOnInit(){
this.name = this.conf.name;
this.url = this.conf.url;
this.attribution = this.conf.attribution;
}
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
@Directive({
selector: 'cityoslayer',
})
export class CityOSLayer extends LeafLayerBase {
@Input() mappingSpace:number;
@Output() itemclick = new EventEmitter<any>();
items;
styles;
int_styles = {}
constructor(private bms:BackendManagerService){
super();
}
getLayer():Promise<L.Layer>{
return new Promise<L.Layer>((resolve, react) =>{
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/styles").getAll().then(s=>{
this.styles = s;
for(let style of this.styles){
this.int_styles[style.slug] = style;
}
console.log(this.int_styles);
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/geolocations").getAll().then(x=>{
console.log(x);
let geoj = L.geoJSON(x , {
style:(feature =>{
return this.int_styles[feature.properties.types[0]];
}),
});
resolve( geoj );
});
});
});
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls["CityOS"] = x;
});
}
}
@Component({
selector: '[mn-map]',
templateUrl: './mn-map.component.html',
styleUrls: ['./mn-map.component.css'],
})
export class MnMapComponent {
private makeid() {
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for( var i=0; i < 5; i++ )
text += possible.charAt(Math.floor(Math.random() * possible.length));
return text;
}
@Input() conf:any;
@Input() map_id:string;
@Input() center:number[] = [51.505,-0.09];
@Input() minzoom:number = 0;
@Input() maxzoom:number = 20;
@Input() startzoom:number = 13;
@Input() controls = true;
@Input() scrollZoom = false;
@Input() zoomControl = true;
@ContentChildren(BaseLayer) baseLayers: QueryList<LeafLayer>;
@ContentChildren(NamedLayer) namedLayers: QueryList<LeafLayer>;
@ContentChildren(DataLayer) dataLayers: QueryList<LeafLayer>;
@ContentChildren(MarkerLayer) markerLayers: QueryList<LeafLayer>;
@ContentChildren(CityOSLayer) cityoslayer: QueryList<LeafLayer>;
@ContentChildren(CityOSBackgroundLayer) cityosbglayer: QueryList<LeafLayer>;
@ContentChildren(MapboxLayer) mapboxLayers: QueryList<LeafLayer>;
@Output() click:EventEmitter<any> = new EventEmitter();
@Output() movestart:EventEmitter<any> = new EventEmitter();
@Output() moveend:EventEmitter<any> = new EventEmitter();
public map;
layers:Array<LeafLayer> = [];
private addLayer(layer:LeafLayer){
this.layers.push(layer);
}
grid_unit:number = 170;
grid_gutter:number = 15;
constructor(private elementRef: ElementRef){
if(this.map_id == null)
this.map_id = this.makeid();
}
protected prepareLayers(){
this.baseLayers.forEach(element => {
this.addLayer(element);
});
this.namedLayers.forEach(element => {
this.addLayer(element);
});
this.dataLayers.forEach(element => {
this.addLayer(element);
});
this.markerLayers.forEach(element => {
this.addLayer(element);
});
this.mapboxLayers.forEach(element => {
this.addLayer(element);
});
this.cityoslayer.forEach(element => {
this.addLayer(element);
});
this.cityosbglayer.forEach(element => {
this.addLayer(element);
});
}
ngAfterViewInit() {
try{
this.map = L.map(this.map_id, {
minZoom:this.minzoom,
maxZoom:this.maxzoom,
scrollWheelZoom:this.scrollZoom,
zoomControl: this.zoomControl
}).setView([this.center[0], this.center[1]], this.startzoom);
this.prepareLayers();
let bls = {};
let dls = {};
for(let lyr of this.layers){
lyr.addToMap(this.map, bls, dls);
}
this.map._onResize();
if(this.controls)
L.control.layers(bls, dls).addTo(this.map);
} catch (ex){
console.log(ex);
}
}
} | random_line_split | |
mn-map.component.ts | /// <reference path="../../../typings/index.d.ts" />
import { ViewChild, ContentChildren,OnInit, Inject, forwardRef, Component,Directive, AfterViewInit, Input, Output, EventEmitter, QueryList, ElementRef } from '@angular/core';
import { Http, Response } from '@angular/http';
import { BackendManagerService } from '../backend-manager.service'
import 'rxjs/add/operator/toPromise';
/**
* Prepresents the generic layer
*/
export interface LeafLayer{
getLayer():L.Layer|Promise<L.Layer>;
addToMap(m, bls, dls);
getName():string;
isBase():boolean;
}
export abstract class LeafLayerBase implements LeafLayer{
abstract getLayer():L.Layer|Promise<L.Layer>;
abstract isBase():boolean;
protected name:string;
getName():string{
return this.name;
}
addToMap(m, bls, dls){
let l = this.getLayer();
m.addLayer(l);
if(this.isBase())
bls[this.getName()] = l;
else
dls[this.getName()] = l;
}
}
/**
* Marker for Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[marker]',
})
export class Marker{
@Input() lon:number;
@Input() lat:number;
@Input() icon:string;
@Input() color:string;
@Input() size:string;
@Input() data:any;
@Input() set geo_data(value){
if (value){
this.data = value;
this.parent.redraw();
}
}
@Output() datachange = new EventEmitter<any>();
constructor(@Inject(forwardRef(() => MarkerLayer)) private parent:MarkerLayer){}
addMarker(lyr){
let m = this.get_marker();
if (m != null){
lyr.addLayer(m);
m.openPopup();
}
}
get_marker(){
if (this.data == null){
if (this.lat !== undefined)
return L.marker([this.lat, this.lon]);
else return null;
} else {
if (this.data.geometry) {
if (this.data.geometry.coordinates[0] != 0) {
let pop = "<div><h3>"+this.data.properties.RagioneSociale+"</h3><p>"+this.data.properties.Indirizzo+", "+this.data.properties.Frazione + " "+this.data.properties.Comune+"</p></div>";
return L.marker(this.data.geometry.coordinates).bindPopup(pop).openPopup();
}
}
}
}
}
/**
* Marker Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: '[markers]',
})
export class MarkerLayer extends LeafLayerBase{
@Input() name:string;
@ContentChildren(Marker) dataLayers: QueryList<Marker>;
layer;
getLayer(){
this.layer = L.featureGroup();
this.redraw();
return this.layer;
}
redraw(){
this.layer.clearLayers();
this.dataLayers.forEach(element => {
element.addMarker(this.layer);
});
}
isBase(){
return false;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'mapboxlayer',
})
export class MapboxLayer extends LeafLayerBase{
@Input() name:string;
@Input() owner:string;
@Input() id:string;
@Input() token:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
let url = "https://api.mapbox.com/styles/v1/"+this.owner+"/"+this.id+"/tiles/256/{z}/{x}/{y}?access_token="+this.token;
console.log(url);
let attribution = "";
return L.tileLayer(url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: attribution});
}
isBase(){
return true;
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'tile_layer',
})
export class BaseLayer extends LeafLayerBase{
@Input() name:string;
@Input() url:string;
@Input() attribution:string;
@Input() minzoom:number = 1;
@Input() maxzoom:number = 20;
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
/**
* Standard Tile Layer
* @param name: one of "osm", "bing", "google", ""
*/
@Directive({
selector: 'namedlayer',
})
export class NamedLayer extends LeafLayerBase {
@Input() layer:string;
configs = {
osms:{name:"OpenStreetMap", url:"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
osm:{name:"OpenStreetMap", url:"http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", attribution:"Map data © <a href=\"http://openstreetmap.org\">OpenStreetMap</a> contributors", minzoom:1, maxzoom:19},
positron:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
darkmatter:{name:"Carto Positron", url:"http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", attribution:'© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="https://carto.com/attributions">CARTO</a>', minzoom:1, maxzoom:19},
};
getLayer(){
if(Object.keys(this.configs).indexOf(this.layer) >= 0){
let lyr = this.configs[this.layer];
return L.tileLayer(lyr.url, {minZoom: lyr.minzoom, maxZoom: lyr.maxzoom, attribution: lyr.attribution});
}
return null;
}
isBase(){
return true;
}
getName(){
if(this.layer in this.configs){
return this.configs[this.layer].name;
}
return "";
}
}
@Directive({
selector: 'datalayer',
})
export class DataLayer extends LeafLayerBase {
@Input() type:string;
@Input() mode:string;
@Input() src:string;
@Input() aggregator:string;
@Input() field:string;
@Input() basestyle:any={};
@Input() propertystyle:any={};
@Input() styledproperty:string;
@Output() areaclick = new EventEmitter<any>();
constructor(private http:Http){
super();
}
the_style(basestyle, styledproperty, propertystyle){
return function(feature){
let gstyle = basestyle;
let v = feature.properties[styledproperty];
let astyle = propertystyle[v];
Object.assign(gstyle, astyle);
return gstyle;
}
}
getLayer():Promise<L.Layer>{
if (this.type == "geojson")
return new Promise<L.Layer>((resolve, react) =>{
this.http.get(this.aggregator).toPromise().then(x=>{
console.log(x);
resolve(L.geoJSON(x.json(), {
style:this.the_style(this.basestyle, this.styledproperty, this.propertystyle),
onEachFeature:(feature, lyr) => {
lyr.on({
click:(e)=>{
this.areaclick.emit({
field:feature.properties[this.field],
feature:feature
});
}
});
}
}));
});
});
return null;
}
isBase(){
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls.push(x);
});
}
}
/**
* Tile Layer
* @param lon: Longitude of the marker
*/
@Directive({
selector: 'cityosbglayer',
})
export class CityOSBackgroundLayer extends LeafLayerBase{
@Input() conf:any;
name:string;
url:string;
attribution:string;
minzoom:number = 1;
maxzoom:number = 20;
ngOnInit(){
this.name = this.conf.name;
this.url = this.conf.url;
this.attribution = this.conf.attribution;
}
getLayer(){
return L.tileLayer(this.url, {minZoom: this.minzoom, maxZoom: this.maxzoom, attribution: this.attribution});
}
isBase(){
return true;
}
}
@Directive({
selector: 'cityoslayer',
})
export class CityOSLayer extends LeafLayerBase {
@Input() mappingSpace:number;
@Output() itemclick = new EventEmitter<any>();
items;
styles;
int_styles = {}
constructor(private bms:BackendManagerService){
super();
}
getLayer():Promise<L.Layer>{
return new Promise<L.Layer>((resolve, react) =>{
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/styles").getAll().then(s=>{
this.styles = s;
for(let style of this.styles){
this.int_styles[style.slug] = style;
}
console.log(this.int_styles);
this.bms.setPaging(false).setActiveApp("spaces/"+this.mappingSpace+"/geolocations").getAll().then(x=>{
console.log(x);
let geoj = L.geoJSON(x , {
style:(feature =>{
return this.int_styles[feature.properties.types[0]];
}),
});
resolve( geoj );
});
});
});
}
is | {
return false;
}
addToMap(m, bls, dls){
this.getLayer().then(x=>{
m.addLayer(x);
dls["CityOS"] = x;
});
}
}
@Component({
selector: '[mn-map]',
templateUrl: './mn-map.component.html',
styleUrls: ['./mn-map.component.css'],
})
export class MnMapComponent {
private makeid() {
var text = "";
var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for( var i=0; i < 5; i++ )
text += possible.charAt(Math.floor(Math.random() * possible.length));
return text;
}
@Input() conf:any;
@Input() map_id:string;
@Input() center:number[] = [51.505,-0.09];
@Input() minzoom:number = 0;
@Input() maxzoom:number = 20;
@Input() startzoom:number = 13;
@Input() controls = true;
@Input() scrollZoom = false;
@Input() zoomControl = true;
@ContentChildren(BaseLayer) baseLayers: QueryList<LeafLayer>;
@ContentChildren(NamedLayer) namedLayers: QueryList<LeafLayer>;
@ContentChildren(DataLayer) dataLayers: QueryList<LeafLayer>;
@ContentChildren(MarkerLayer) markerLayers: QueryList<LeafLayer>;
@ContentChildren(CityOSLayer) cityoslayer: QueryList<LeafLayer>;
@ContentChildren(CityOSBackgroundLayer) cityosbglayer: QueryList<LeafLayer>;
@ContentChildren(MapboxLayer) mapboxLayers: QueryList<LeafLayer>;
@Output() click:EventEmitter<any> = new EventEmitter();
@Output() movestart:EventEmitter<any> = new EventEmitter();
@Output() moveend:EventEmitter<any> = new EventEmitter();
public map;
layers:Array<LeafLayer> = [];
private addLayer(layer:LeafLayer){
this.layers.push(layer);
}
grid_unit:number = 170;
grid_gutter:number = 15;
constructor(private elementRef: ElementRef){
if(this.map_id == null)
this.map_id = this.makeid();
}
protected prepareLayers(){
this.baseLayers.forEach(element => {
this.addLayer(element);
});
this.namedLayers.forEach(element => {
this.addLayer(element);
});
this.dataLayers.forEach(element => {
this.addLayer(element);
});
this.markerLayers.forEach(element => {
this.addLayer(element);
});
this.mapboxLayers.forEach(element => {
this.addLayer(element);
});
this.cityoslayer.forEach(element => {
this.addLayer(element);
});
this.cityosbglayer.forEach(element => {
this.addLayer(element);
});
}
ngAfterViewInit() {
try{
this.map = L.map(this.map_id, {
minZoom:this.minzoom,
maxZoom:this.maxzoom,
scrollWheelZoom:this.scrollZoom,
zoomControl: this.zoomControl
}).setView([this.center[0], this.center[1]], this.startzoom);
this.prepareLayers();
let bls = {};
let dls = {};
for(let lyr of this.layers){
lyr.addToMap(this.map, bls, dls);
}
this.map._onResize();
if(this.controls)
L.control.layers(bls, dls).addTo(this.map);
} catch (ex){
console.log(ex);
}
}
}
| Base() | identifier_name |
world.py | """Define availible tiles and their actions, and build level from map file"""
import sys
import random
import decimal
import src.npc as npc
import src.enemies as enemies
class MapTile:
def __init__(self, x, y):
self.x = x
self.y = y
self.visited = 0
self.type = ''
def intro_text(self):
raise NotImplementedError("Create a subclass instead!")
def modify_player(self, player):
pass
class StartTile(MapTile):
def intro_text(self):
print("\n ^ ^ ^ ^ ___I_ ^ ^ ^ ^ ^ ^ ^")
print(" /|\\/|\\/|\\ /|\\ /\\-_--\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ / \\_-__\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ |[]| [] | /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print("\n LEVEL 1: The Forest")
print("\n You chase \"The Villain\" to the edge of a forest.")
print(" Looks like you are going to have to head in after him....")
class BoringTile(MapTile):
def intro_text(self):
print("\n v . ._, |_ .,")
print(" `-._\\/ . \\ / |/_")
print(" \\ _\\, y | \\//")
print(" _\\_.___\\, \\/ -.\\||")
print(" `7-,--.`._|| / / ,")
print(" /' `-. `./ / |/_.'")
print(" | |//")
print(" |_ /")
print(" |- |")
print(" | =|")
print(" | |")
print(" --------------------/ , . \\--------._")
print("\n This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
|
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S)ell, or (Q)uit?: ")
if user_input in ['Q', 'q']:
player.room.visited = 0
return
elif user_input in ['B', 'b']:
print("Here's whats available to buy:\n")
self.trade(buyer=player, seller=self.trader)
elif user_input in ['S', 's']:
print("Here's whats available to sell:\n")
self.trade(buyer=self.trader, seller=player)
else:
print("Invalid choice!")
def intro_text(self):
print("\n _________##")
print(" @\\\\\\\\\\\\\\\\\\##")
print(" @@@\\\\\\\\\\\\\\\\##\\")
print(" @@ @@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@@@----------|")
print(" @@ @@@ @@__________|")
print(" @@@@@@@@@__________|")
print(" @@@@ .@@@__________|")
print(" _\|/__@@@@__@@@__________|__")
print("\n Trading Post")
print("\n Press \"T\" to trade")
class FindGoldTile(MapTile):
def __init__(self, x, y):
self.gold = random.randint(20, 75)
self.gold_claimed = False
super().__init__(x, y)
def modify_player(self, player):
if not self.gold_claimed:
self.gold_claimed = True
luc_mod = decimal.Decimal(player.luc_stat / 100)
found_loot = round(self.gold * luc_mod, 0)
player.gold += found_loot
print("\n You found {} gold coins!".format(found_loot))
def intro_text(self):
print("\n |#######=====================#######|")
print(" |#(1)*UNITED STATES OF WHAYEVER*(1)#|")
print(" |#** /===\ ******** **#|")
print(" |*# {G} | (\") | #*|")
print(" |#* ****** | /v\ | O N E *#|")
print(" |#(1) \===/ (1)#|")
print(" |##===========SOME GOLD===========##|")
if self.gold_claimed:
print("\n You've already looted this place!")
else:
print("\n Someone dropped some gold. You pick it up.")
start_tile_location = None
tile_type_dict = {"VT": VictoryTile,
"EN": EnemyTile,
"ST": StartTile,
"NA": BoringTile,
"FG": FindGoldTile,
"TT": TraderTile,
" ": None}
def is_dsl_valid(dsl):
if dsl.count("|ST|") != 1:
return False
if dsl.count("|VT|") == 0:
return False
lines = dsl.splitlines()
lines = [l for l in lines if l]
pipe_counts = [line.count("|") for line in lines]
for count in pipe_counts:
if count != pipe_counts[0]:
return False
return True
def parse_world_dsl(map_file):
world_map = []
level_map = open(map_file, 'r').read()
if not is_dsl_valid(level_map):
sys.exit("Rumtime error: unable to parse map file")
dsl_lines = level_map.splitlines()
dsl_lines = [x for x in dsl_lines if x]
for y, dsl_row in enumerate(dsl_lines):
row = []
dsl_cells = dsl_row.split("|")
dsl_cells = [c for c in dsl_cells if c]
for x, dsl_cell in enumerate(dsl_cells):
if dsl_cell not in tile_type_dict:
sys.exit("Map parse error: Invalid room type in map")
break
tile_type = tile_type_dict[dsl_cell]
tile = None
if tile_type is not None:
tile = tile_type(x, y)
if tile_type == StartTile:
global start_tile_location
start_tile_location = x, y
row.append(tile)
world_map.append(row)
return world_map
| if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp)) | identifier_body |
world.py | """Define availible tiles and their actions, and build level from map file"""
import sys
import random
import decimal
import src.npc as npc
import src.enemies as enemies
class MapTile:
def __init__(self, x, y):
self.x = x
self.y = y
self.visited = 0
self.type = ''
def intro_text(self):
raise NotImplementedError("Create a subclass instead!")
def modify_player(self, player):
pass
class StartTile(MapTile):
def intro_text(self):
print("\n ^ ^ ^ ^ ___I_ ^ ^ ^ ^ ^ ^ ^")
print(" /|\\/|\\/|\\ /|\\ /\\-_--\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ / \\_-__\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ |[]| [] | /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print("\n LEVEL 1: The Forest")
print("\n You chase \"The Villain\" to the edge of a forest.")
print(" Looks like you are going to have to head in after him....")
class BoringTile(MapTile):
def | (self):
print("\n v . ._, |_ .,")
print(" `-._\\/ . \\ / |/_")
print(" \\ _\\, y | \\//")
print(" _\\_.___\\, \\/ -.\\||")
print(" `7-,--.`._|| / / ,")
print(" /' `-. `./ / |/_.'")
print(" | |//")
print(" |_ /")
print(" |- |")
print(" | =|")
print(" | |")
print(" --------------------/ , . \\--------._")
print("\n This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp))
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S)ell, or (Q)uit?: ")
if user_input in ['Q', 'q']:
player.room.visited = 0
return
elif user_input in ['B', 'b']:
print("Here's whats available to buy:\n")
self.trade(buyer=player, seller=self.trader)
elif user_input in ['S', 's']:
print("Here's whats available to sell:\n")
self.trade(buyer=self.trader, seller=player)
else:
print("Invalid choice!")
def intro_text(self):
print("\n _________##")
print(" @\\\\\\\\\\\\\\\\\\##")
print(" @@@\\\\\\\\\\\\\\\\##\\")
print(" @@ @@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@@@----------|")
print(" @@ @@@ @@__________|")
print(" @@@@@@@@@__________|")
print(" @@@@ .@@@__________|")
print(" _\|/__@@@@__@@@__________|__")
print("\n Trading Post")
print("\n Press \"T\" to trade")
class FindGoldTile(MapTile):
def __init__(self, x, y):
self.gold = random.randint(20, 75)
self.gold_claimed = False
super().__init__(x, y)
def modify_player(self, player):
if not self.gold_claimed:
self.gold_claimed = True
luc_mod = decimal.Decimal(player.luc_stat / 100)
found_loot = round(self.gold * luc_mod, 0)
player.gold += found_loot
print("\n You found {} gold coins!".format(found_loot))
def intro_text(self):
print("\n |#######=====================#######|")
print(" |#(1)*UNITED STATES OF WHAYEVER*(1)#|")
print(" |#** /===\ ******** **#|")
print(" |*# {G} | (\") | #*|")
print(" |#* ****** | /v\ | O N E *#|")
print(" |#(1) \===/ (1)#|")
print(" |##===========SOME GOLD===========##|")
if self.gold_claimed:
print("\n You've already looted this place!")
else:
print("\n Someone dropped some gold. You pick it up.")
start_tile_location = None
tile_type_dict = {"VT": VictoryTile,
"EN": EnemyTile,
"ST": StartTile,
"NA": BoringTile,
"FG": FindGoldTile,
"TT": TraderTile,
" ": None}
def is_dsl_valid(dsl):
if dsl.count("|ST|") != 1:
return False
if dsl.count("|VT|") == 0:
return False
lines = dsl.splitlines()
lines = [l for l in lines if l]
pipe_counts = [line.count("|") for line in lines]
for count in pipe_counts:
if count != pipe_counts[0]:
return False
return True
def parse_world_dsl(map_file):
world_map = []
level_map = open(map_file, 'r').read()
if not is_dsl_valid(level_map):
sys.exit("Rumtime error: unable to parse map file")
dsl_lines = level_map.splitlines()
dsl_lines = [x for x in dsl_lines if x]
for y, dsl_row in enumerate(dsl_lines):
row = []
dsl_cells = dsl_row.split("|")
dsl_cells = [c for c in dsl_cells if c]
for x, dsl_cell in enumerate(dsl_cells):
if dsl_cell not in tile_type_dict:
sys.exit("Map parse error: Invalid room type in map")
break
tile_type = tile_type_dict[dsl_cell]
tile = None
if tile_type is not None:
tile = tile_type(x, y)
if tile_type == StartTile:
global start_tile_location
start_tile_location = x, y
row.append(tile)
world_map.append(row)
return world_map
| intro_text | identifier_name |
world.py | """Define availible tiles and their actions, and build level from map file"""
import sys
import random
import decimal
import src.npc as npc
import src.enemies as enemies
class MapTile:
def __init__(self, x, y):
self.x = x
self.y = y
self.visited = 0
self.type = ''
def intro_text(self):
raise NotImplementedError("Create a subclass instead!")
def modify_player(self, player):
pass
class StartTile(MapTile):
def intro_text(self):
print("\n ^ ^ ^ ^ ___I_ ^ ^ ^ ^ ^ ^ ^")
print(" /|\\/|\\/|\\ /|\\ /\\-_--\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ / \\_-__\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ |[]| [] | /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print("\n LEVEL 1: The Forest")
print("\n You chase \"The Villain\" to the edge of a forest.")
print(" Looks like you are going to have to head in after him....")
class BoringTile(MapTile):
def intro_text(self):
print("\n v . ._, |_ .,")
print(" `-._\\/ . \\ / |/_")
print(" \\ _\\, y | \\//")
print(" _\\_.___\\, \\/ -.\\||")
print(" `7-,--.`._|| / / ,")
print(" /' `-. `./ / |/_.'")
print(" | |//")
print(" |_ /")
print(" |- |")
print(" | =|")
print(" | |")
print(" --------------------/ , . \\--------._")
print("\n This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp))
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S)ell, or (Q)uit?: ")
if user_input in ['Q', 'q']:
player.room.visited = 0
return
elif user_input in ['B', 'b']:
print("Here's whats available to buy:\n")
self.trade(buyer=player, seller=self.trader)
elif user_input in ['S', 's']:
print("Here's whats available to sell:\n")
self.trade(buyer=self.trader, seller=player)
else:
print("Invalid choice!")
def intro_text(self):
print("\n _________##")
print(" @\\\\\\\\\\\\\\\\\\##")
print(" @@@\\\\\\\\\\\\\\\\##\\")
print(" @@ @@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@@@----------|")
print(" @@ @@@ @@__________|")
print(" @@@@@@@@@__________|")
print(" @@@@ .@@@__________|")
print(" _\|/__@@@@__@@@__________|__")
print("\n Trading Post")
print("\n Press \"T\" to trade")
class FindGoldTile(MapTile):
def __init__(self, x, y):
self.gold = random.randint(20, 75)
self.gold_claimed = False
super().__init__(x, y)
def modify_player(self, player):
if not self.gold_claimed:
self.gold_claimed = True
luc_mod = decimal.Decimal(player.luc_stat / 100)
found_loot = round(self.gold * luc_mod, 0)
player.gold += found_loot
print("\n You found {} gold coins!".format(found_loot))
def intro_text(self):
print("\n |#######=====================#######|")
print(" |#(1)*UNITED STATES OF WHAYEVER*(1)#|")
print(" |#** /===\ ******** **#|")
print(" |*# {G} | (\") | #*|")
print(" |#* ****** | /v\ | O N E *#|")
print(" |#(1) \===/ (1)#|")
print(" |##===========SOME GOLD===========##|")
if self.gold_claimed:
print("\n You've already looted this place!")
else:
print("\n Someone dropped some gold. You pick it up.")
start_tile_location = None
tile_type_dict = {"VT": VictoryTile,
"EN": EnemyTile,
"ST": StartTile,
"NA": BoringTile,
"FG": FindGoldTile,
"TT": TraderTile,
" ": None}
def is_dsl_valid(dsl):
if dsl.count("|ST|") != 1:
return False
if dsl.count("|VT|") == 0:
return False
lines = dsl.splitlines()
lines = [l for l in lines if l]
pipe_counts = [line.count("|") for line in lines]
for count in pipe_counts:
if count != pipe_counts[0]:
return False
return True
def parse_world_dsl(map_file):
world_map = []
level_map = open(map_file, 'r').read()
if not is_dsl_valid(level_map):
sys.exit("Rumtime error: unable to parse map file")
dsl_lines = level_map.splitlines()
dsl_lines = [x for x in dsl_lines if x]
for y, dsl_row in enumerate(dsl_lines):
row = []
dsl_cells = dsl_row.split("|")
dsl_cells = [c for c in dsl_cells if c]
for x, dsl_cell in enumerate(dsl_cells):
if dsl_cell not in tile_type_dict:
sys.exit("Map parse error: Invalid room type in map") | tile_type = tile_type_dict[dsl_cell]
tile = None
if tile_type is not None:
tile = tile_type(x, y)
if tile_type == StartTile:
global start_tile_location
start_tile_location = x, y
row.append(tile)
world_map.append(row)
return world_map | break
| random_line_split |
world.py | """Define availible tiles and their actions, and build level from map file"""
import sys
import random
import decimal
import src.npc as npc
import src.enemies as enemies
class MapTile:
def __init__(self, x, y):
self.x = x
self.y = y
self.visited = 0
self.type = ''
def intro_text(self):
raise NotImplementedError("Create a subclass instead!")
def modify_player(self, player):
pass
class StartTile(MapTile):
def intro_text(self):
print("\n ^ ^ ^ ^ ___I_ ^ ^ ^ ^ ^ ^ ^")
print(" /|\\/|\\/|\\ /|\\ /\\-_--\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ / \\_-__\\ /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print(" /|\\/|\\/|\\ /|\\ |[]| [] | /|\\/|\\ /|\\/|\\/|\\ /|\\/|\\")
print("\n LEVEL 1: The Forest")
print("\n You chase \"The Villain\" to the edge of a forest.")
print(" Looks like you are going to have to head in after him....")
class BoringTile(MapTile):
def intro_text(self):
print("\n v . ._, |_ .,")
print(" `-._\\/ . \\ / |/_")
print(" \\ _\\, y | \\//")
print(" _\\_.___\\, \\/ -.\\||")
print(" `7-,--.`._|| / / ,")
print(" /' `-. `./ / |/_.'")
print(" | |//")
print(" |_ /")
print(" |- |")
print(" | =|")
print(" | |")
print(" --------------------/ , . \\--------._")
print("\n This is a very boring part of the forest. Fuck all happens here")
class VictoryTile(MapTile):
def modify_player(self, player):
player.victory = True
exit()
def intro_text(self):
print("\n .''.")
print(" .''. *''* :_\/_: .")
print(" :_\/_: . .:.*_\/_* : /\ : .'.:.'.")
print(" .''.: /\ : _\(/_ ':'* /\ * : '..'. -=:o:=-")
print(" :_\/_:'.:::. /)\*''* .|.* '.\'/.'_\(/_'.':'.'")
print(" : /\ : ::::: '*_\/_* | | -= o =- /)\ ' *")
print(" '..' ':::' * /\ * |'| .'/.\'. '._____")
print(" * __*..* | | : |. |' .---\"|")
print(" _* .-' '-. | | .--'| || | _| |")
print(" .-'| _.| | || '-__ | | | || |")
print(" |' | |. | || | | | | || |")
print(" ____| '-' ' "" '-' '-.' '` |____")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n You Saved \"The Girl\"!")
print("\n You whisk her unto your arms and dissaper in to the sunset!")
print(" Lets hope she makes it worth your while ;)")
print("\n\n Thanks for playing the game!")
class EnemyTile(MapTile):
def __init__(self, x, y):
encounter_type = random.random()
if encounter_type < 0.30:
self.enemy = enemies.GiantSpider()
self.alive_text = "\nA giant spider jumps down from " \
"its web and lands right in front " \
"of you!"
self.dead_text = "\nThe lifeless corpse of the spider " \
"slumps in the corner. Creepy."
elif encounter_type < 0.60:
self.enemy = enemies.Goblin()
self.alive_text = "\nA nasty lttile goblin leaps out at you" \
"and waves his stabby daggar at you!"
self.dead_text = "\nThe gblin exploded all over the walls." \
"I'm not cleaning that up.'"
elif encounter_type < 0.80:
self.enemy = enemies.Ogre()
self.alive_text = "\nA ogre blocks your path!"
self.dead_text = "\nThe oger died convinietly off of " \
"the path, out of the way."
elif encounter_type < 0.95:
self.enemy = enemies.BatColony()
self.alive_text = "\nBats. Eeshk..."
self.dead_text = "\nThe furry bastards are dead"
else:
self.enemy = enemies.RockMonster()
self.alive_text = "\nIs it a bird? Is it a plane? no " \
"it's a rock monster!"
self.dead_text = "\nYou killed a rock. " \
"Now thats dedication!!"
self.enemy.hp = self.enemy.randomise_stats(self.enemy.hp)
self.enemy.damage = self.enemy.randomise_stats(self.enemy.damage)
self.enemy.loot = self.enemy.randomise_stats(self.enemy.loot)
super().__init__(x, y)
def intro_text(self):
if self.enemy.is_alive():
print("\n ___________.___ ________ ___ ___ ___________._.")
print(" \\_ _____/| | / _____/ / | \\ \\__ ___/| |")
print(" | __) | |/ \\ ___ / ~ \\ | | | |")
print(" | \\ | |\\ \\_\\ \\\\ Y / | | \\|")
print(" \\___ / |___| \\______ / \\___|_ / |____| __")
print(" \\/ \\/ \\/ \\/")
print(self.alive_text)
print("{} has {} HP".format(self.enemy.name, self.enemy.hp))
else:
print(" ____ ____.___ _________ ___________________ __________ _____.___.._.")
print(" \\ \\ / /| |\\_ ___ \\ \\__ ___/\\_____ \\ \\______ \\\\__ | || |")
print(" \\ Y / | |/ \\ \\/ | | / | \\ | _/ / | || |")
print(" \\ / | |\\ \\____ | | / | \\ | | \\ \\____ | \\|")
print(" \\___/ |___| \\______ / |____| \\_______ / |____|_ / / ______| __")
print(" \\/ \\/ \\/ \\/ \\/")
print(self.dead_text)
def modify_player(self, player):
if self.enemy.is_alive():
dex_mod = decimal.Decimal(player.dex_stat / 100)
dodge_chance = decimal.Decimal(random.random()) * dex_mod
miss_chance = decimal.Decimal(random.random()) * dex_mod
if miss_chance > 0.98:
print("The {} missed!".format(self.enemy.name))
elif dodge_chance > 0.98:
print("You dodged the attack!")
else:
def_mod = decimal.Decimal(2 - (player.def_stat / 100))
enemy_damage = round(self.enemy.damage * def_mod, 0)
player.curr_hp -= enemy_damage
print("The {} does {} damage. You have {} HP remaining."
.format(self.enemy.name, enemy_damage, player.curr_hp))
class TraderTile(MapTile):
def __init__(self, x, y):
self.trader = npc.Trader()
super().__init__(x, y)
def trade(self, buyer, seller):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
seller_inventory = []
item_choice = None
for item_cat, item_attr in seller.inventory['Items'].items():
if item_attr["qty"] > 0:
seller_inventory.append(item_attr["obj"])
print("\nTrading Items")
print("----------------\n")
if not seller_inventory:
print("There are no items to sell!")
else:
for i, item in enumerate(seller_inventory, 1):
print(" {}: {}".format(i, item.name))
print("\nq: Cancel trade")
while item_choice not in seller_inventory:
item_choice = input("\nWhich item do you want to {}? ".format(action))
if item_choice in ['Q', 'q']:
if buyer_char == "Player":
buyer.room.visited = 0
else:
seller.room.visited = 0
return
else:
try:
to_swap = seller_inventory[int(item_choice) - 1]
self.swap(seller, buyer, to_swap)
except (ValueError, IndexError):
print("Invalid choice!")
def swap(self, seller, buyer, item):
if buyer.name == "Trader":
action = "sell"
buyer_char = "Trader"
else:
action = "buy"
buyer_char = "Player"
if item.value > buyer.gold:
print("That's too expensive!")
self.trade(buyer, seller)
for item_cat, item_attr in seller.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] -= 1
if item.name in buyer.inventory['Items'].items():
for item_cat, item_attr in buyer.inventory['Items'].items():
if item_cat == item.name:
item_attr["qty"] += 1
else:
buyer.inventory['Items'][item.name] = {}
buyer.inventory['Items'][item.name]['obj'] = item
buyer.inventory['Items'][item.name]['qty'] = 1
seller.gold = seller.gold + item.value
buyer.gold = buyer.gold - item.value
print("Trade complete!")
def check_if_trade(self, player):
while True:
if len(self.trader.inventory) == 0:
print("No items to trade!")
player.room.visited = 0
return
user_input = input("Would you like to (B)uy, (S)ell, or (Q)uit?: ")
if user_input in ['Q', 'q']:
player.room.visited = 0
return
elif user_input in ['B', 'b']:
|
elif user_input in ['S', 's']:
print("Here's whats available to sell:\n")
self.trade(buyer=self.trader, seller=player)
else:
print("Invalid choice!")
def intro_text(self):
print("\n _________##")
print(" @\\\\\\\\\\\\\\\\\\##")
print(" @@@\\\\\\\\\\\\\\\\##\\")
print(" @@ @@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@\\\\\\\\\\\\\\\\\\\\\\")
print(" @@@@@@@@@----------|")
print(" @@ @@@ @@__________|")
print(" @@@@@@@@@__________|")
print(" @@@@ .@@@__________|")
print(" _\|/__@@@@__@@@__________|__")
print("\n Trading Post")
print("\n Press \"T\" to trade")
class FindGoldTile(MapTile):
def __init__(self, x, y):
self.gold = random.randint(20, 75)
self.gold_claimed = False
super().__init__(x, y)
def modify_player(self, player):
if not self.gold_claimed:
self.gold_claimed = True
luc_mod = decimal.Decimal(player.luc_stat / 100)
found_loot = round(self.gold * luc_mod, 0)
player.gold += found_loot
print("\n You found {} gold coins!".format(found_loot))
def intro_text(self):
print("\n |#######=====================#######|")
print(" |#(1)*UNITED STATES OF WHAYEVER*(1)#|")
print(" |#** /===\ ******** **#|")
print(" |*# {G} | (\") | #*|")
print(" |#* ****** | /v\ | O N E *#|")
print(" |#(1) \===/ (1)#|")
print(" |##===========SOME GOLD===========##|")
if self.gold_claimed:
print("\n You've already looted this place!")
else:
print("\n Someone dropped some gold. You pick it up.")
start_tile_location = None
tile_type_dict = {"VT": VictoryTile,
"EN": EnemyTile,
"ST": StartTile,
"NA": BoringTile,
"FG": FindGoldTile,
"TT": TraderTile,
" ": None}
def is_dsl_valid(dsl):
if dsl.count("|ST|") != 1:
return False
if dsl.count("|VT|") == 0:
return False
lines = dsl.splitlines()
lines = [l for l in lines if l]
pipe_counts = [line.count("|") for line in lines]
for count in pipe_counts:
if count != pipe_counts[0]:
return False
return True
def parse_world_dsl(map_file):
world_map = []
level_map = open(map_file, 'r').read()
if not is_dsl_valid(level_map):
sys.exit("Rumtime error: unable to parse map file")
dsl_lines = level_map.splitlines()
dsl_lines = [x for x in dsl_lines if x]
for y, dsl_row in enumerate(dsl_lines):
row = []
dsl_cells = dsl_row.split("|")
dsl_cells = [c for c in dsl_cells if c]
for x, dsl_cell in enumerate(dsl_cells):
if dsl_cell not in tile_type_dict:
sys.exit("Map parse error: Invalid room type in map")
break
tile_type = tile_type_dict[dsl_cell]
tile = None
if tile_type is not None:
tile = tile_type(x, y)
if tile_type == StartTile:
global start_tile_location
start_tile_location = x, y
row.append(tile)
world_map.append(row)
return world_map
| print("Here's whats available to buy:\n")
self.trade(buyer=player, seller=self.trader) | conditional_block |
types.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use serde::{
de::{Deserializer, Error},
Deserialize, Serialize,
};
use serde_repr::{Deserialize_repr, Serialize_repr};
use uuid::Uuid;
use crate::payment_command::Actor;
/// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request,
/// used for tracking requests and debugging. Responses must have the same string in the
/// X-REQUEST-ID header value as the requests they correspond to.
pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID";
/// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP
/// request sender must use the compliance key of the VASP account linked with this address to sign
/// the request JWS body, and the request receiver uses this address to find the request sender's
/// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The
/// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP
/// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS.
pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS";
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)]
enum ObjectType {
CommandRequestObject,
CommandResponseObject,
PaymentCommand,
}
impl ObjectType {
fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandRequestObject)
}
fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandResponseObject)
}
fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::PaymentCommand)
}
fn deserialize_variant<'de, D: Deserializer<'de>>(
d: D,
variant: Self,
) -> Result<Self, D::Error> {
let object_type = Self::deserialize(d)?;
if object_type == variant {
Ok(object_type)
} else {
Err(D::Error::custom(format_args!("expected {:?}", variant)))
}
}
}
#[derive(Deserialize, Serialize)]
pub struct CommandRequestObject {
#[serde(deserialize_with = "ObjectType::deserialize_request")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
#[serde(flatten)]
command: Command,
cid: Uuid,
}
impl CommandRequestObject {
pub fn new(command: Command, cid: Uuid) -> Self {
Self {
object_type: ObjectType::CommandRequestObject,
command,
cid,
}
}
pub fn command(&self) -> &Command {
&self.command
}
pub fn cid(&self) -> Uuid {
self.cid
}
pub fn into_parts(self) -> (Command, Uuid) {
(self.command, self.cid)
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum CommandStatus {
Success,
Failure,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct CommandResponseObject {
#[serde(deserialize_with = "ObjectType::deserialize_response")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
status: CommandStatus,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<OffChainError>,
#[serde(skip_serializing_if = "Option::is_none")]
cid: Option<Uuid>,
}
impl CommandResponseObject {
pub fn new(status: CommandStatus) -> Self {
Self {
object_type: ObjectType::CommandResponseObject,
status,
error: None,
cid: None,
}
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum OffChainErrorType {
#[serde(rename = "command_error")]
Command,
#[serde(rename = "protocol_error")]
Protocol,
}
// https://dip.diem.com/dip-1/#list-of-error-codes
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ErrorCode {
//
// HTTP Header Validation Error Codes
//
/// One of the following potential errors:
/// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the
/// command object. All command objects should have a field that is the request sender’s
/// address.
/// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(paym | : PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
&self.payment
}
pub fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// then “1” equals the same amount here. For any currency, the on-chain mapping must be used
/// for amounts.
pub amount: u64,
/// One of the supported on-chain currency types - ex. XUS, etc.
// TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject
pub currency: String,
/// Populated in the request. This value indicates the requested action to perform, and the
/// only valid value is charge.
pub action: ActionType,
/// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment
/// Command was created.
pub timestamp: u64,
}
/// Some fields are immutable after they are defined once. Others can be updated multiple times
/// (see below). Updating immutable fields with a different value results in a Command error.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentObject {
/// Information about the sender in this payment
pub sender: PaymentActorObject,
/// Information about the receiver in this payment
pub receiver: PaymentActorObject,
/// Unique reference ID of this payment on the payment initiator VASP (the VASP which
/// originally created this payment Object). This value should be globally unique. This field
/// is mandatory on payment creation and immutable after that. We recommend using a 128 bits
/// long UUID according to RFC4122 with "-"'s included.
pub reference_id: Uuid,
/// Used to refer an old payment known to the other VASP. For example, used for refunds. The
/// reference ID of the original payment will be placed into this field. This field is
/// mandatory on refund and immutable
pub originial_payment_reference_id: Option<Uuid>,
/// Signature of the recipient of this transaction encoded in hex. The is signed with the
/// compliance key of the recipient VASP and is used for on-chain attestation from the
/// recipient party. This may be omitted on blockchains which do not require on-chain
/// attestation.
pub recipient_signature: Option<String>,
/// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable
pub action: PaymentActionObject,
/// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length
/// of 255 characters. This field is optional but can only be written once.
pub description: Option<String>,
}
impl PaymentObject {
pub fn sender(&self) -> &PaymentActorObject {
&self.sender
}
pub fn receiver(&self) -> &PaymentActorObject {
&self.receiver
}
pub fn reference_id(&self) -> Uuid {
self.reference_id
}
pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject {
match actor {
Actor::Sender => self.sender(),
Actor::Receiver => self.receiver(),
}
}
pub fn recipient_signature(&self) -> Option<&str> {
self.recipient_signature.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
self.sender.validate_write_once_fields(&prior.sender)?;
self.receiver.validate_write_once_fields(&prior.receiver)?;
if self.reference_id != prior.reference_id {
return Err(WriteOnceError);
}
if self.originial_payment_reference_id != prior.originial_payment_reference_id {
return Err(WriteOnceError);
}
if self.action != prior.action {
return Err(WriteOnceError);
}
if prior.description.is_some() && prior.description != self.description {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct StatusObject {
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable.
pub status: Status,
/// In the case of an `abort` status, this field may be used to describe the reason for the
/// abort. Represents the error code of the corresponding error.
pub abort_code: Option<AbortCode>,
/// Additional details about this error. To be used only when `abort_code` is populated.
pub abort_message: Option<String>,
}
impl StatusObject {
pub fn status(&self) -> Status {
self.status
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Status {
/// No status is yet set from this actor.
None,
/// KYC data about the subaddresses is required by this actor.
NeedsKycData,
/// Transaction is ready for settlement according to this actor (i.e. the requried
/// signatures/KYC data has been provided.
ReadyForSettlement,
/// Indicates the actor wishes to abort this payment, instaed of settling it.
Abort,
/// Actor's KYC data resulted in a soft-match, request additional KYC data.
SoftMatch,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AbortCode {
/// The payment is rejected. It should not be used in the `original_payment_reference_id` field
/// of a new payment
Rejected,
}
/// Represents a national ID.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct NationalIdObject {
/// Indicates the national ID value - for example, a social security number
pub id_value: String,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Indicates the type of the ID
#[serde(rename = "type")]
pub id_type: Option<String>,
}
/// Represents a physical address
#[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)]
pub struct AddressObject {
/// The city, district, suburb, town, or village
pub city: Option<String>,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Address line 1
pub line1: Option<String>,
/// Address line 2 - apartment, unit, etc.
pub line2: Option<String>,
/// ZIP or postal code
pub postal_code: Option<String>,
/// State, county, province, region.
pub state: Option<String>,
}
/// A `KycDataObject` represents the required information for a single subaddress. Proof of
/// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory
/// fields are `payload_version` and `type`. All other fields are optional from the point of view of
/// the protocol -- however they may need to be included for another VASP to be ready to settle the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct KycDataObject {
/// Version identifier to allow modifications to KYC data Object without needing to bump
/// version of entire API set. Set to 1
payload_version: KycDataObjectVersion,
pub kyc_data_type: KycDataObjectType,
/// Legal given name of the user for which this KYC data Object applies.
pub given_name: Option<String>,
/// Legal surname of the user for which this KYC data Object applies.
pub surname: Option<String>,
/// Physical address data for this account
pub address: Option<AddressObject>,
/// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date
/// format: https://en.wikipedia.org/wiki/ISO_8601
pub dob: Option<String>,
/// Place of birth for this user. line1 and line2 fields should not be populated for this usage
/// of the address Object
pub place_of_birth: Option<String>,
/// National ID information for the holder of this account
pub national_id: Option<NationalIdObject>,
/// Name of the legal entity. Used when subaddress represents a legal entity rather than an
/// individual. KYCDataObject should only include one of legal_entity_name OR
/// given_name/surname
pub legal_entity_name: Option<String>,
}
impl KycDataObject {
pub fn new_entity() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Entity,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
pub fn new_individual() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Individual,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum KycDataObjectType {
Individual,
Entity,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)]
#[repr(u8)]
pub enum KycDataObjectVersion {
V1 = 1,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct WriteOnceError;
#[cfg(test)]
mod tests {
use super::{KycDataObjectType, KycDataObjectVersion};
use serde_json::json;
#[test]
fn kyc_data_object_type() {
use KycDataObjectType::*;
let variants = [(Individual, "individual"), (Entity, "entity")];
for (variant, s) in &variants {
let json = json! { s };
assert_eq!(serde_json::to_value(variant).unwrap(), json);
assert_eq!(
serde_json::from_value::<KycDataObjectType>(json).unwrap(),
*variant
);
}
let invalid = json! { "Organization" };
serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err();
}
#[test]
fn kyc_data_object_version() {
let v1_json = json! { 1 };
let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap();
assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json);
let invalid_version = json! { 52 };
serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err();
let invalid_type = json! { "1" };
serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err();
}
}
| ent | identifier_name |
types.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use serde::{
de::{Deserializer, Error},
Deserialize, Serialize,
};
use serde_repr::{Deserialize_repr, Serialize_repr};
use uuid::Uuid;
use crate::payment_command::Actor;
/// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request,
/// used for tracking requests and debugging. Responses must have the same string in the
/// X-REQUEST-ID header value as the requests they correspond to.
pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID";
/// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP
/// request sender must use the compliance key of the VASP account linked with this address to sign
/// the request JWS body, and the request receiver uses this address to find the request sender's
/// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The
/// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP
/// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS.
pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS";
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)]
enum ObjectType {
CommandRequestObject,
CommandResponseObject,
PaymentCommand,
}
impl ObjectType {
fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandRequestObject)
}
fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandResponseObject)
}
fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::PaymentCommand)
}
fn deserialize_variant<'de, D: Deserializer<'de>>(
d: D,
variant: Self,
) -> Result<Self, D::Error> {
let object_type = Self::deserialize(d)?;
if object_type == variant {
Ok(object_type)
} else |
}
}
#[derive(Deserialize, Serialize)]
pub struct CommandRequestObject {
#[serde(deserialize_with = "ObjectType::deserialize_request")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
#[serde(flatten)]
command: Command,
cid: Uuid,
}
impl CommandRequestObject {
pub fn new(command: Command, cid: Uuid) -> Self {
Self {
object_type: ObjectType::CommandRequestObject,
command,
cid,
}
}
pub fn command(&self) -> &Command {
&self.command
}
pub fn cid(&self) -> Uuid {
self.cid
}
pub fn into_parts(self) -> (Command, Uuid) {
(self.command, self.cid)
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum CommandStatus {
Success,
Failure,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct CommandResponseObject {
#[serde(deserialize_with = "ObjectType::deserialize_response")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
status: CommandStatus,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<OffChainError>,
#[serde(skip_serializing_if = "Option::is_none")]
cid: Option<Uuid>,
}
impl CommandResponseObject {
pub fn new(status: CommandStatus) -> Self {
Self {
object_type: ObjectType::CommandResponseObject,
status,
error: None,
cid: None,
}
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum OffChainErrorType {
#[serde(rename = "command_error")]
Command,
#[serde(rename = "protocol_error")]
Protocol,
}
// https://dip.diem.com/dip-1/#list-of-error-codes
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ErrorCode {
//
// HTTP Header Validation Error Codes
//
/// One of the following potential errors:
/// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the
/// command object. All command objects should have a field that is the request sender’s
/// address.
/// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(payment: PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
&self.payment
}
pub fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// then “1” equals the same amount here. For any currency, the on-chain mapping must be used
/// for amounts.
pub amount: u64,
/// One of the supported on-chain currency types - ex. XUS, etc.
// TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject
pub currency: String,
/// Populated in the request. This value indicates the requested action to perform, and the
/// only valid value is charge.
pub action: ActionType,
/// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment
/// Command was created.
pub timestamp: u64,
}
/// Some fields are immutable after they are defined once. Others can be updated multiple times
/// (see below). Updating immutable fields with a different value results in a Command error.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentObject {
/// Information about the sender in this payment
pub sender: PaymentActorObject,
/// Information about the receiver in this payment
pub receiver: PaymentActorObject,
/// Unique reference ID of this payment on the payment initiator VASP (the VASP which
/// originally created this payment Object). This value should be globally unique. This field
/// is mandatory on payment creation and immutable after that. We recommend using a 128 bits
/// long UUID according to RFC4122 with "-"'s included.
pub reference_id: Uuid,
/// Used to refer an old payment known to the other VASP. For example, used for refunds. The
/// reference ID of the original payment will be placed into this field. This field is
/// mandatory on refund and immutable
pub originial_payment_reference_id: Option<Uuid>,
/// Signature of the recipient of this transaction encoded in hex. The is signed with the
/// compliance key of the recipient VASP and is used for on-chain attestation from the
/// recipient party. This may be omitted on blockchains which do not require on-chain
/// attestation.
pub recipient_signature: Option<String>,
/// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable
pub action: PaymentActionObject,
/// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length
/// of 255 characters. This field is optional but can only be written once.
pub description: Option<String>,
}
impl PaymentObject {
pub fn sender(&self) -> &PaymentActorObject {
&self.sender
}
pub fn receiver(&self) -> &PaymentActorObject {
&self.receiver
}
pub fn reference_id(&self) -> Uuid {
self.reference_id
}
pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject {
match actor {
Actor::Sender => self.sender(),
Actor::Receiver => self.receiver(),
}
}
pub fn recipient_signature(&self) -> Option<&str> {
self.recipient_signature.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
self.sender.validate_write_once_fields(&prior.sender)?;
self.receiver.validate_write_once_fields(&prior.receiver)?;
if self.reference_id != prior.reference_id {
return Err(WriteOnceError);
}
if self.originial_payment_reference_id != prior.originial_payment_reference_id {
return Err(WriteOnceError);
}
if self.action != prior.action {
return Err(WriteOnceError);
}
if prior.description.is_some() && prior.description != self.description {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct StatusObject {
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable.
pub status: Status,
/// In the case of an `abort` status, this field may be used to describe the reason for the
/// abort. Represents the error code of the corresponding error.
pub abort_code: Option<AbortCode>,
/// Additional details about this error. To be used only when `abort_code` is populated.
pub abort_message: Option<String>,
}
impl StatusObject {
pub fn status(&self) -> Status {
self.status
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Status {
/// No status is yet set from this actor.
None,
/// KYC data about the subaddresses is required by this actor.
NeedsKycData,
/// Transaction is ready for settlement according to this actor (i.e. the requried
/// signatures/KYC data has been provided.
ReadyForSettlement,
/// Indicates the actor wishes to abort this payment, instaed of settling it.
Abort,
/// Actor's KYC data resulted in a soft-match, request additional KYC data.
SoftMatch,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AbortCode {
/// The payment is rejected. It should not be used in the `original_payment_reference_id` field
/// of a new payment
Rejected,
}
/// Represents a national ID.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct NationalIdObject {
/// Indicates the national ID value - for example, a social security number
pub id_value: String,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Indicates the type of the ID
#[serde(rename = "type")]
pub id_type: Option<String>,
}
/// Represents a physical address
#[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)]
pub struct AddressObject {
/// The city, district, suburb, town, or village
pub city: Option<String>,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Address line 1
pub line1: Option<String>,
/// Address line 2 - apartment, unit, etc.
pub line2: Option<String>,
/// ZIP or postal code
pub postal_code: Option<String>,
/// State, county, province, region.
pub state: Option<String>,
}
/// A `KycDataObject` represents the required information for a single subaddress. Proof of
/// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory
/// fields are `payload_version` and `type`. All other fields are optional from the point of view of
/// the protocol -- however they may need to be included for another VASP to be ready to settle the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct KycDataObject {
/// Version identifier to allow modifications to KYC data Object without needing to bump
/// version of entire API set. Set to 1
payload_version: KycDataObjectVersion,
pub kyc_data_type: KycDataObjectType,
/// Legal given name of the user for which this KYC data Object applies.
pub given_name: Option<String>,
/// Legal surname of the user for which this KYC data Object applies.
pub surname: Option<String>,
/// Physical address data for this account
pub address: Option<AddressObject>,
/// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date
/// format: https://en.wikipedia.org/wiki/ISO_8601
pub dob: Option<String>,
/// Place of birth for this user. line1 and line2 fields should not be populated for this usage
/// of the address Object
pub place_of_birth: Option<String>,
/// National ID information for the holder of this account
pub national_id: Option<NationalIdObject>,
/// Name of the legal entity. Used when subaddress represents a legal entity rather than an
/// individual. KYCDataObject should only include one of legal_entity_name OR
/// given_name/surname
pub legal_entity_name: Option<String>,
}
impl KycDataObject {
pub fn new_entity() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Entity,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
pub fn new_individual() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Individual,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum KycDataObjectType {
Individual,
Entity,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)]
#[repr(u8)]
pub enum KycDataObjectVersion {
V1 = 1,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct WriteOnceError;
#[cfg(test)]
mod tests {
use super::{KycDataObjectType, KycDataObjectVersion};
use serde_json::json;
#[test]
fn kyc_data_object_type() {
use KycDataObjectType::*;
let variants = [(Individual, "individual"), (Entity, "entity")];
for (variant, s) in &variants {
let json = json! { s };
assert_eq!(serde_json::to_value(variant).unwrap(), json);
assert_eq!(
serde_json::from_value::<KycDataObjectType>(json).unwrap(),
*variant
);
}
let invalid = json! { "Organization" };
serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err();
}
#[test]
fn kyc_data_object_version() {
let v1_json = json! { 1 };
let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap();
assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json);
let invalid_version = json! { 52 };
serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err();
let invalid_type = json! { "1" };
serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err();
}
}
| {
Err(D::Error::custom(format_args!("expected {:?}", variant)))
} | conditional_block |
types.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use serde::{
de::{Deserializer, Error},
Deserialize, Serialize,
};
use serde_repr::{Deserialize_repr, Serialize_repr};
use uuid::Uuid;
use crate::payment_command::Actor;
/// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request,
/// used for tracking requests and debugging. Responses must have the same string in the
/// X-REQUEST-ID header value as the requests they correspond to.
pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID";
/// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP
/// request sender must use the compliance key of the VASP account linked with this address to sign
/// the request JWS body, and the request receiver uses this address to find the request sender's
/// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The
/// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP
/// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS.
pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS";
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)]
enum ObjectType {
CommandRequestObject,
CommandResponseObject,
PaymentCommand,
}
impl ObjectType {
fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandRequestObject)
}
fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandResponseObject)
}
fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::PaymentCommand)
}
fn deserialize_variant<'de, D: Deserializer<'de>>(
d: D,
variant: Self,
) -> Result<Self, D::Error> {
let object_type = Self::deserialize(d)?;
if object_type == variant {
Ok(object_type)
} else {
Err(D::Error::custom(format_args!("expected {:?}", variant)))
}
}
}
#[derive(Deserialize, Serialize)]
pub struct CommandRequestObject {
#[serde(deserialize_with = "ObjectType::deserialize_request")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
#[serde(flatten)]
command: Command,
cid: Uuid,
}
impl CommandRequestObject {
pub fn new(command: Command, cid: Uuid) -> Self {
Self {
object_type: ObjectType::CommandRequestObject,
command,
cid,
}
}
pub fn command(&self) -> &Command {
&self.command
}
pub fn cid(&self) -> Uuid {
self.cid
}
pub fn into_parts(self) -> (Command, Uuid) {
(self.command, self.cid)
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum CommandStatus {
Success,
Failure,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct CommandResponseObject {
#[serde(deserialize_with = "ObjectType::deserialize_response")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
status: CommandStatus,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<OffChainError>,
#[serde(skip_serializing_if = "Option::is_none")]
cid: Option<Uuid>,
}
impl CommandResponseObject {
pub fn new(status: CommandStatus) -> Self {
Self {
object_type: ObjectType::CommandResponseObject,
status,
error: None,
cid: None,
}
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum OffChainErrorType {
#[serde(rename = "command_error")]
Command,
#[serde(rename = "protocol_error")]
Protocol,
}
// https://dip.diem.com/dip-1/#list-of-error-codes
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ErrorCode {
//
// HTTP Header Validation Error Codes
//
/// One of the following potential errors:
/// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the
/// command object. All command objects should have a field that is the request sender’s
/// address.
/// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(payment: PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
| b fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// then “1” equals the same amount here. For any currency, the on-chain mapping must be used
/// for amounts.
pub amount: u64,
/// One of the supported on-chain currency types - ex. XUS, etc.
// TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject
pub currency: String,
/// Populated in the request. This value indicates the requested action to perform, and the
/// only valid value is charge.
pub action: ActionType,
/// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment
/// Command was created.
pub timestamp: u64,
}
/// Some fields are immutable after they are defined once. Others can be updated multiple times
/// (see below). Updating immutable fields with a different value results in a Command error.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentObject {
/// Information about the sender in this payment
pub sender: PaymentActorObject,
/// Information about the receiver in this payment
pub receiver: PaymentActorObject,
/// Unique reference ID of this payment on the payment initiator VASP (the VASP which
/// originally created this payment Object). This value should be globally unique. This field
/// is mandatory on payment creation and immutable after that. We recommend using a 128 bits
/// long UUID according to RFC4122 with "-"'s included.
pub reference_id: Uuid,
/// Used to refer an old payment known to the other VASP. For example, used for refunds. The
/// reference ID of the original payment will be placed into this field. This field is
/// mandatory on refund and immutable
pub originial_payment_reference_id: Option<Uuid>,
/// Signature of the recipient of this transaction encoded in hex. The is signed with the
/// compliance key of the recipient VASP and is used for on-chain attestation from the
/// recipient party. This may be omitted on blockchains which do not require on-chain
/// attestation.
pub recipient_signature: Option<String>,
/// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable
pub action: PaymentActionObject,
/// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length
/// of 255 characters. This field is optional but can only be written once.
pub description: Option<String>,
}
impl PaymentObject {
pub fn sender(&self) -> &PaymentActorObject {
&self.sender
}
pub fn receiver(&self) -> &PaymentActorObject {
&self.receiver
}
pub fn reference_id(&self) -> Uuid {
self.reference_id
}
pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject {
match actor {
Actor::Sender => self.sender(),
Actor::Receiver => self.receiver(),
}
}
pub fn recipient_signature(&self) -> Option<&str> {
self.recipient_signature.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
self.sender.validate_write_once_fields(&prior.sender)?;
self.receiver.validate_write_once_fields(&prior.receiver)?;
if self.reference_id != prior.reference_id {
return Err(WriteOnceError);
}
if self.originial_payment_reference_id != prior.originial_payment_reference_id {
return Err(WriteOnceError);
}
if self.action != prior.action {
return Err(WriteOnceError);
}
if prior.description.is_some() && prior.description != self.description {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct StatusObject {
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable.
pub status: Status,
/// In the case of an `abort` status, this field may be used to describe the reason for the
/// abort. Represents the error code of the corresponding error.
pub abort_code: Option<AbortCode>,
/// Additional details about this error. To be used only when `abort_code` is populated.
pub abort_message: Option<String>,
}
impl StatusObject {
pub fn status(&self) -> Status {
self.status
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Status {
/// No status is yet set from this actor.
None,
/// KYC data about the subaddresses is required by this actor.
NeedsKycData,
/// Transaction is ready for settlement according to this actor (i.e. the requried
/// signatures/KYC data has been provided.
ReadyForSettlement,
/// Indicates the actor wishes to abort this payment, instaed of settling it.
Abort,
/// Actor's KYC data resulted in a soft-match, request additional KYC data.
SoftMatch,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AbortCode {
/// The payment is rejected. It should not be used in the `original_payment_reference_id` field
/// of a new payment
Rejected,
}
/// Represents a national ID.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct NationalIdObject {
/// Indicates the national ID value - for example, a social security number
pub id_value: String,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Indicates the type of the ID
#[serde(rename = "type")]
pub id_type: Option<String>,
}
/// Represents a physical address
#[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)]
pub struct AddressObject {
/// The city, district, suburb, town, or village
pub city: Option<String>,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Address line 1
pub line1: Option<String>,
/// Address line 2 - apartment, unit, etc.
pub line2: Option<String>,
/// ZIP or postal code
pub postal_code: Option<String>,
/// State, county, province, region.
pub state: Option<String>,
}
/// A `KycDataObject` represents the required information for a single subaddress. Proof of
/// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory
/// fields are `payload_version` and `type`. All other fields are optional from the point of view of
/// the protocol -- however they may need to be included for another VASP to be ready to settle the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct KycDataObject {
/// Version identifier to allow modifications to KYC data Object without needing to bump
/// version of entire API set. Set to 1
payload_version: KycDataObjectVersion,
pub kyc_data_type: KycDataObjectType,
/// Legal given name of the user for which this KYC data Object applies.
pub given_name: Option<String>,
/// Legal surname of the user for which this KYC data Object applies.
pub surname: Option<String>,
/// Physical address data for this account
pub address: Option<AddressObject>,
/// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date
/// format: https://en.wikipedia.org/wiki/ISO_8601
pub dob: Option<String>,
/// Place of birth for this user. line1 and line2 fields should not be populated for this usage
/// of the address Object
pub place_of_birth: Option<String>,
/// National ID information for the holder of this account
pub national_id: Option<NationalIdObject>,
/// Name of the legal entity. Used when subaddress represents a legal entity rather than an
/// individual. KYCDataObject should only include one of legal_entity_name OR
/// given_name/surname
pub legal_entity_name: Option<String>,
}
impl KycDataObject {
pub fn new_entity() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Entity,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
pub fn new_individual() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Individual,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum KycDataObjectType {
Individual,
Entity,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)]
#[repr(u8)]
pub enum KycDataObjectVersion {
V1 = 1,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct WriteOnceError;
#[cfg(test)]
mod tests {
use super::{KycDataObjectType, KycDataObjectVersion};
use serde_json::json;
#[test]
fn kyc_data_object_type() {
use KycDataObjectType::*;
let variants = [(Individual, "individual"), (Entity, "entity")];
for (variant, s) in &variants {
let json = json! { s };
assert_eq!(serde_json::to_value(variant).unwrap(), json);
assert_eq!(
serde_json::from_value::<KycDataObjectType>(json).unwrap(),
*variant
);
}
let invalid = json! { "Organization" };
serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err();
}
#[test]
fn kyc_data_object_version() {
let v1_json = json! { 1 };
let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap();
assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json);
let invalid_version = json! { 52 };
serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err();
let invalid_type = json! { "1" };
serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err();
}
}
| &self.payment
}
pu | identifier_body |
types.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use serde::{
de::{Deserializer, Error},
Deserialize, Serialize,
};
use serde_repr::{Deserialize_repr, Serialize_repr};
use uuid::Uuid;
use crate::payment_command::Actor;
/// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request,
/// used for tracking requests and debugging. Responses must have the same string in the
/// X-REQUEST-ID header value as the requests they correspond to.
pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID";
/// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP
/// request sender must use the compliance key of the VASP account linked with this address to sign
/// the request JWS body, and the request receiver uses this address to find the request sender's
/// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The
/// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP
/// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS.
pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS";
#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)]
enum ObjectType {
CommandRequestObject,
CommandResponseObject,
PaymentCommand,
}
impl ObjectType {
fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandRequestObject)
}
fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::CommandResponseObject)
}
fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
Self::deserialize_variant(d, Self::PaymentCommand)
}
fn deserialize_variant<'de, D: Deserializer<'de>>(
d: D,
variant: Self,
) -> Result<Self, D::Error> {
let object_type = Self::deserialize(d)?;
if object_type == variant {
Ok(object_type)
} else {
Err(D::Error::custom(format_args!("expected {:?}", variant)))
}
}
}
#[derive(Deserialize, Serialize)]
pub struct CommandRequestObject {
#[serde(deserialize_with = "ObjectType::deserialize_request")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
#[serde(flatten)]
command: Command,
cid: Uuid,
}
impl CommandRequestObject {
pub fn new(command: Command, cid: Uuid) -> Self {
Self {
object_type: ObjectType::CommandRequestObject,
command,
cid,
}
}
pub fn command(&self) -> &Command {
&self.command
}
pub fn cid(&self) -> Uuid {
self.cid
}
pub fn into_parts(self) -> (Command, Uuid) {
(self.command, self.cid)
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum CommandStatus {
Success,
Failure,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct CommandResponseObject {
#[serde(deserialize_with = "ObjectType::deserialize_response")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
status: CommandStatus,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<OffChainError>,
#[serde(skip_serializing_if = "Option::is_none")]
cid: Option<Uuid>,
}
impl CommandResponseObject {
pub fn new(status: CommandStatus) -> Self {
Self {
object_type: ObjectType::CommandResponseObject,
status,
error: None,
cid: None,
}
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum OffChainErrorType {
#[serde(rename = "command_error")]
Command,
#[serde(rename = "protocol_error")]
Protocol,
}
// https://dip.diem.com/dip-1/#list-of-error-codes
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ErrorCode {
//
// HTTP Header Validation Error Codes
//
/// One of the following potential errors:
/// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the
/// command object. All command objects should have a field that is the request sender’s
/// address.
/// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value.
/// * Could not find the compliance key of the onchain account found by the
/// `X-REQUEST-SENDER-ADDRESS` header value.
/// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a
/// valid ED25519 public key.
/// * `X-REQUEST-ID` is not a valid UUID format.
InvalidHttpHeader,
/// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`.
MissingHttpHeader,
//
// JWS Validation Error Codes#
//
/// Invalid JWS format (compact) or protected header
InvalidJws,
/// JWS signature verification failed
InvalidJwsSignature,
//
// Request Object Validation Error Codes#
//
/// Request content is not valid Json
InvalidJson,
/// Object is not valid, type does not match
/// The Command request/response object json is not an object, or the command object type does
/// not match command_type.
InvalidObject,
/// Either:
/// * Missing required field
/// * An optional field is required to be set for a specific state, e.g. PaymentObject requires
/// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init
/// the PaymentObject.
MissingField,
/// A field is unknown for an object.
UnknownField,
/// Invalid/unsupported command_type.
UnknownCommandType,
/// * Invalid / unknown enum field values.
/// * UUID field value does not match UUID format.
/// * Payment actor address is not a valid DIP-5 account identifier.
/// * Currency field value is not a valid Diem currency code for the connected network.
InvalidFieldValue,
/// The HTTP request sender is not the right actor to send the payment object. For example, if
/// the actor receiver sends a new command with payment object change that should be done by
/// actor sender.
InvalidCommandProducer,
/// could not find command by reference_id for a non-initial state command object; for example,
/// actor receiver received a payment command object that actor sender status is
/// `ready_for_settlement`, but receiver could not find any command object by the reference id.
InvalidInitialOrPriorNotFound,
/// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the
/// transaction
NoKycNeeded,
/// Either:
/// * Field recipient_signature value is not hex-encoded bytes.
/// * Field recipient_signature value is an invalid signature.
InvalidRecipientSignature,
/// * The DIP-5 account identifier address in the command object is not HTTP request sender’s
/// address or receiver’s address. For payment object it is sender.address or
/// receiver.address.
/// * Could not find on-chain account by an DIP-5 account identifier address in command object
/// address.
UnknownAddress,
/// * Command object is in conflict with another different command object by cid, likely a cid
/// is reused for different command object.
/// * Failed to acquire lock for the command object by the reference_id.
Conflict,
/// Field payment.action.currency value is a valid Diem currency code, but it is not supported
/// or acceptable by the receiver VASP.
UnsupportedCurrency,
/// * Could not find data by the original_payment_reference_id if the sender set it.
/// * The status of the original payment object found by original_payment_reference_id is
/// aborted instead of ready_for_settlement.
InvalidOriginalPaymentReferenceId,
/// Overwrite a write-once/immutable field value
/// * Overwrite a field that can only be written once.
/// * Overwrite an immutable field (field can only be set in initial command object), e.g.
/// `original_payment_reference_id`).
/// * Overwrite opponent payment actor's fields.
InvalidOverwrite,
/// As we only allow one actor action at a time, and the next states for a given command object
/// state are limited to specific states. This error indicates the new payment object state is
/// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B,
/// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond
/// to this error code if VASP B sends payment object state SSOFT.
InvalidTransition,
#[serde(other)]
/// Unknown Error Code
Unknown,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct OffChainError {
#[serde(rename = "type")]
error_type: OffChainErrorType,
#[serde(skip_serializing_if = "Option::is_none")]
field: Option<String>,
code: ErrorCode,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "command_type", content = "command")]
pub enum Command {
PaymentCommand(PaymentCommandObject),
FundPullPreApprovalCommand,
}
#[derive(Deserialize, Serialize)]
pub struct PaymentCommandObject {
#[serde(deserialize_with = "ObjectType::deserialize_payment")]
#[serde(rename = "_ObjectType")]
object_type: ObjectType,
payment: PaymentObject,
}
impl PaymentCommandObject {
pub fn new(payment: PaymentObject) -> Self {
Self {
object_type: ObjectType::PaymentCommand,
payment,
}
}
pub fn payment(&self) -> &PaymentObject {
&self.payment
}
pub fn into_payment(self) -> PaymentObject {
self.payment
}
}
/// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It
/// also includes the status of the actor, indicates missing information or willingness to settle
/// or abort the payment, and the Know-Your-Customer information of the customer involved in the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActorObject {
/// Address of the sender/receiver account. Addresses may be single use or valid for a limited
/// time, and therefore VASPs should not rely on them remaining stable across time or different
/// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the
/// address of the VASP as well as the specific user's subaddress. They should be no longer
/// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account
/// identifier" section in DIP-5 for format.
pub address: Box<str>,
/// The KYC data for this account. This field is optional but immutable once it is set.
pub kyc_data: Option<KycDataObject>,
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable. Note that in the first request (which is initiated by the sender), the receiver
/// status should be set to `None`.
pub status: StatusObject,
/// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP
/// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New
/// `metadata` elements may be appended to the `metadata` list via subsequent commands on an
/// object.
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub metadata: Vec<String>,
/// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC
/// data which can be used to clear the soft-match. It is suggested that this data be JSON,
/// XML, or another human-readable form.
pub additional_kyc_data: Option<String>,
}
impl PaymentActorObject {
pub fn status(&self) -> &StatusObject {
&self.status
}
pub fn kyc_data(&self) -> Option<&KycDataObject> {
self.kyc_data.as_ref()
}
pub fn additional_kyc_data(&self) -> Option<&str> {
self.additional_kyc_data.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
if self.address != prior.address {
return Err(WriteOnceError);
}
if prior.kyc_data.is_some() && prior.kyc_data != self.kyc_data {
return Err(WriteOnceError);
}
if !self.metadata.starts_with(&prior.metadata) {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum ActionType {
Charge,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentActionObject {
/// Amount of the transfer. Base units are the same as for on-chain transactions for this
/// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars,
/// then “1” equals the same amount here. For any currency, the on-chain mapping must be used
/// for amounts.
pub amount: u64,
/// One of the supported on-chain currency types - ex. XUS, etc.
// TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject
pub currency: String,
/// Populated in the request. This value indicates the requested action to perform, and the
/// only valid value is charge.
pub action: ActionType,
/// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment
/// Command was created.
pub timestamp: u64,
}
/// Some fields are immutable after they are defined once. Others can be updated multiple times
/// (see below). Updating immutable fields with a different value results in a Command error.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct PaymentObject {
/// Information about the sender in this payment
pub sender: PaymentActorObject,
/// Information about the receiver in this payment
pub receiver: PaymentActorObject,
/// Unique reference ID of this payment on the payment initiator VASP (the VASP which
/// originally created this payment Object). This value should be globally unique. This field
/// is mandatory on payment creation and immutable after that. We recommend using a 128 bits
/// long UUID according to RFC4122 with "-"'s included.
pub reference_id: Uuid,
/// Used to refer an old payment known to the other VASP. For example, used for refunds. The
/// reference ID of the original payment will be placed into this field. This field is
/// mandatory on refund and immutable
pub originial_payment_reference_id: Option<Uuid>,
/// Signature of the recipient of this transaction encoded in hex. The is signed with the
/// compliance key of the recipient VASP and is used for on-chain attestation from the
/// recipient party. This may be omitted on blockchains which do not require on-chain
/// attestation.
pub recipient_signature: Option<String>,
/// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable
pub action: PaymentActionObject,
/// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length
/// of 255 characters. This field is optional but can only be written once.
pub description: Option<String>,
}
impl PaymentObject {
pub fn sender(&self) -> &PaymentActorObject {
&self.sender
}
pub fn receiver(&self) -> &PaymentActorObject {
&self.receiver
}
pub fn reference_id(&self) -> Uuid {
self.reference_id
}
pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject {
match actor {
Actor::Sender => self.sender(),
Actor::Receiver => self.receiver(),
}
}
pub fn recipient_signature(&self) -> Option<&str> {
self.recipient_signature.as_deref()
}
pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> {
self.sender.validate_write_once_fields(&prior.sender)?;
self.receiver.validate_write_once_fields(&prior.receiver)?;
if self.reference_id != prior.reference_id {
return Err(WriteOnceError);
}
if self.originial_payment_reference_id != prior.originial_payment_reference_id {
return Err(WriteOnceError);
}
if self.action != prior.action {
return Err(WriteOnceError);
}
if prior.description.is_some() && prior.description != self.description {
return Err(WriteOnceError);
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct StatusObject {
/// Status of the payment from the perspective of this actor. This field can only be set by the
/// respective sender/receiver VASP and represents the status on the sender/receiver VASP side.
/// This field is mandatory by this respective actor (either sender or receiver side) and
/// mutable.
pub status: Status,
/// In the case of an `abort` status, this field may be used to describe the reason for the
/// abort. Represents the error code of the corresponding error.
pub abort_code: Option<AbortCode>,
/// Additional details about this error. To be used only when `abort_code` is populated.
pub abort_message: Option<String>,
}
impl StatusObject {
pub fn status(&self) -> Status {
self.status
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Status {
/// No status is yet set from this actor.
None,
/// KYC data about the subaddresses is required by this actor.
NeedsKycData,
/// Transaction is ready for settlement according to this actor (i.e. the requried
/// signatures/KYC data has been provided.
ReadyForSettlement,
/// Indicates the actor wishes to abort this payment, instaed of settling it.
Abort,
/// Actor's KYC data resulted in a soft-match, request additional KYC data.
SoftMatch,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AbortCode {
/// The payment is rejected. It should not be used in the `original_payment_reference_id` field
/// of a new payment
Rejected,
}
/// Represents a national ID.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct NationalIdObject {
/// Indicates the national ID value - for example, a social security number
pub id_value: String, |
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Indicates the type of the ID
#[serde(rename = "type")]
pub id_type: Option<String>,
}
/// Represents a physical address
#[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)]
pub struct AddressObject {
/// The city, district, suburb, town, or village
pub city: Option<String>,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
pub country: Option<String>,
/// Address line 1
pub line1: Option<String>,
/// Address line 2 - apartment, unit, etc.
pub line2: Option<String>,
/// ZIP or postal code
pub postal_code: Option<String>,
/// State, county, province, region.
pub state: Option<String>,
}
/// A `KycDataObject` represents the required information for a single subaddress. Proof of
/// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory
/// fields are `payload_version` and `type`. All other fields are optional from the point of view of
/// the protocol -- however they may need to be included for another VASP to be ready to settle the
/// payment.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct KycDataObject {
/// Version identifier to allow modifications to KYC data Object without needing to bump
/// version of entire API set. Set to 1
payload_version: KycDataObjectVersion,
pub kyc_data_type: KycDataObjectType,
/// Legal given name of the user for which this KYC data Object applies.
pub given_name: Option<String>,
/// Legal surname of the user for which this KYC data Object applies.
pub surname: Option<String>,
/// Physical address data for this account
pub address: Option<AddressObject>,
/// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date
/// format: https://en.wikipedia.org/wiki/ISO_8601
pub dob: Option<String>,
/// Place of birth for this user. line1 and line2 fields should not be populated for this usage
/// of the address Object
pub place_of_birth: Option<String>,
/// National ID information for the holder of this account
pub national_id: Option<NationalIdObject>,
/// Name of the legal entity. Used when subaddress represents a legal entity rather than an
/// individual. KYCDataObject should only include one of legal_entity_name OR
/// given_name/surname
pub legal_entity_name: Option<String>,
}
impl KycDataObject {
pub fn new_entity() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Entity,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
pub fn new_individual() -> Self {
Self {
payload_version: KycDataObjectVersion::V1,
kyc_data_type: KycDataObjectType::Individual,
given_name: None,
surname: None,
address: None,
dob: None,
place_of_birth: None,
national_id: None,
legal_entity_name: None,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum KycDataObjectType {
Individual,
Entity,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)]
#[repr(u8)]
pub enum KycDataObjectVersion {
V1 = 1,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct WriteOnceError;
#[cfg(test)]
mod tests {
use super::{KycDataObjectType, KycDataObjectVersion};
use serde_json::json;
#[test]
fn kyc_data_object_type() {
use KycDataObjectType::*;
let variants = [(Individual, "individual"), (Entity, "entity")];
for (variant, s) in &variants {
let json = json! { s };
assert_eq!(serde_json::to_value(variant).unwrap(), json);
assert_eq!(
serde_json::from_value::<KycDataObjectType>(json).unwrap(),
*variant
);
}
let invalid = json! { "Organization" };
serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err();
}
#[test]
fn kyc_data_object_version() {
let v1_json = json! { 1 };
let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap();
assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json);
let invalid_version = json! { 52 };
serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err();
let invalid_type = json! { "1" };
serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err();
}
} | random_line_split | |
PhaseOne.js | import {
Badge,
Button,
Card,
CardMedia,
Divider,
Grid,
makeStyles,
Typography,
} from "@material-ui/core";
import { Alert, AlertTitle } from "@material-ui/lab";
import React, { useEffect, useState } from "react";
import { useSelector } from "react-redux";
import startSound from "../../assets/sound/start.wav";
import coinSound from "../../assets/sound/coin.wav";
import passSound from "../../assets/sound/warm_beep.wav";
// update type constants (manual sync with server-side constant needed)
const TYPES = {
BID: "BID",
PASS: "PASS",
};
const useStyles = makeStyles((theme) => ({
root: {
flexGrow: 1,
},
boardWrapper: {
padding: theme.spacing(1),
},
notTurn: {
pointerEvents: "none",
},
propertyRow: {
width: "100%",
},
card: {
position: "relative",
borderRadius: "12px",
width: "120px",
},
cardOverlay: {
position: "absolute",
top: 0,
left: 0,
bottom: 0,
right: 0,
display: "flex",
justifyContent: "center",
alignItems: "center",
backgroundColor: "rgba(0,0,0,0.5)",
"& *": {
color: "white",
},
"&.front": {
backgroundColor: "lightblue",
"& *": {
color: "black",
},
}, | },
biddingStatus: {
border: "1px solid #aaa",
padding: theme.spacing(1),
backgroundColor: "#eee",
},
coinImage: {
width: "60px",
cursor: "pointer",
borderRadius: "100%",
"&.selected": {
border: "2px solid orange",
boxShadow: "0 0 10px orange",
},
},
coinStatusTable: {
borderTop: `1px solid #aaa`,
borderBottom: `1px solid #aaa`,
marginTop: theme.spacing(1),
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: `1px solid #aaa`,
},
"& p": {
fontWeight: "bold",
},
},
btnGroup: {
marginTop: theme.spacing(2),
textAlign: "center",
"&>button": {
fontWeight: "bold",
},
"&>button:first-child": {
marginRight: theme.spacing(4),
},
},
gameStateHeader: {
fontWeight: "bold",
fontSize: "0.8rem",
},
[theme.breakpoints.down("xs")]: {
cardImage: {
width: "70px",
},
coinStatusTable: {
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: "none",
borderBottom: `1px solid #aaa`,
},
},
},
}));
const PhaseOne = ({ socket, gameState, room }) => {
const classes = useStyles();
const auth = useSelector((state) => state.auth);
const [activePlayer, setActivePlayer] = useState(null);
const [myState, setMyState] = useState(null);
const [selectedCoins, setSelectedCoins] = useState([]);
const [selectedValues, setSelectedValues] = useState(0);
const [startAudio] = useState(new Audio(startSound));
const [coinAudio] = useState(new Audio(coinSound));
const [passAudio] = useState(new Audio(passSound));
useEffect(() => {
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding != null
) {
coinAudio.currentTime = 0;
coinAudio.play();
}
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding == null
) {
passAudio.currentTime = 0;
passAudio.play();
}
const active = gameState?.players.find((player) => player.isTurn);
const me = gameState?.players.find(
(player) => player.userId === auth.userInfo._id
);
setActivePlayer(active);
setMyState(me);
}, [gameState]);
useEffect(() => {
startAudio.play();
}, []);
const onCoinClick = (index, value) => {
if (selectedCoins.includes(index)) {
setSelectedCoins(selectedCoins.filter((idx) => idx !== index));
setSelectedValues(selectedValues - value);
} else {
setSelectedCoins([...selectedCoins, index]);
setSelectedValues(selectedValues + value);
}
};
const onBidClick = () => {
socket.emit("updateForSale", {
type: TYPES.BID,
payload: {
selectedCoinsIndex: selectedCoins,
},
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
const onPassClick = () => {
socket.emit("updateForSale", {
type: TYPES.PASS,
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
// Utility to display coin values with commas in every three digits
const numberWithCommas = (num) => {
return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
};
// Utility to render client user's remaining coin values
const remainingCoins = () => {
return numberWithCommas(
myState.coins.reduce((acc, coin) => acc + coin.value, 0)
);
};
// Utility to find minimum bid available for current round
const minimumBid = () => {
const bids = gameState.players.map((player) => player.bidding);
return Math.max(...bids) + 1000;
};
return (
<>
{gameState && myState && (
<div
className={`${classes.root} ${myState.isTurn ? "" : classes.notTurn}`}
>
<Alert severity="info" variant="standard">
{activePlayer ? (
<AlertTitle>
Player{" "}
<span className={classes.activePlayerName}>
{activePlayer.username}
</span>{" "}
is making a decision...
</AlertTitle>
) : (
<AlertTitle> New round is about to begin...</AlertTitle>
)}
</Alert>
<div className={classes.boardWrapper}>
<Grid
container
className={classes.propertyRow}
justify="center"
alignItems="flex-start"
spacing={2}
>
<Grid item>
<Card className={classes.card}>
<CardMedia
src="https://i.pinimg.com/236x/b9/70/33/b97033a8708d2cbaf7d1990020a89a54--playing-cards-deck.jpg"
component="img"
className={classes.cardImage}
/>
<div className={classes.cardOverlay}>
<Typography variant="h5">
{gameState.remainingProperties}
</Typography>
</div>
</Card>
</Grid>
<Grid container item xs spacing={1} justify="flex-start">
{gameState.openProperties.map((propertyCard) => {
const renderCard = () => (
<Card className={classes.card}>
<CardMedia
src={propertyCard.image_url}
component="img"
className={classes.cardImage}
/>
<div className={`${classes.cardOverlay} front`}>
<Typography variant="h5">
{propertyCard.value}
</Typography>
</div>
</Card>
);
const taken = propertyCard.taken;
return (
<Grid item key={propertyCard.value}>
{taken ? (
<Badge
badgeContent={taken}
color="primary"
anchorOrigin={{
vertical: "bottom",
horizontal: "left",
}}
>
{renderCard()}
</Badge>
) : (
renderCard()
)}
</Grid>
);
})}
</Grid>
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Typography variant="overline" className={classes.gameStateHeader}>
Bidding Status
</Typography>
<Grid container className={classes.biddingRow} spacing={1}>
{gameState.players.map((player) => {
return (
<Grid item xs={12} sm={6} key={player.userId}>
<Typography
variant="h6"
className={`${
activePlayer && player.userId === activePlayer.userId
? classes.activePlayerName
: ""
} ${classes.biddingStatus}`}
>
{player.username}:{" "}
{player.bidding || player.bidding === 0
? `$ ${numberWithCommas(player.bidding)}`
: "PASS"}
</Typography>
</Grid>
);
})}
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Grid container spacing={1} justify="flex-start">
{myState.coins.map((coin, index) => {
return (
<Grid item key={index}>
<img
title={coin.value}
src={coin.image_url}
alt="coin"
className={`${classes.coinImage} ${
selectedCoins.includes(index) && "selected"
}`}
onClick={() => onCoinClick(index, coin.value)}
/>
</Grid>
);
})}
</Grid>
<Grid
container
spacing={2}
justify="center"
className={classes.coinStatusTable}
>
<Grid item xs={12} sm={4}>
<Typography>Remaining: $ {remainingCoins()}</Typography>
</Grid>
<Grid item xs={12} sm={4}>
<Typography>
Current: ${" "}
{numberWithCommas(selectedValues + myState.bidding)}
</Typography>
</Grid>
<Grid item xs={12} sm={4}>
<Typography>
Minimum: $ {numberWithCommas(minimumBid())}
</Typography>
</Grid>
</Grid>
<div className={classes.btnGroup}>
<Button
variant="contained"
color="primary"
onClick={onBidClick}
disabled={
selectedValues + myState.bidding < minimumBid() ||
!myState.isTurn
}
>
Bid
</Button>
<Button
variant="contained"
color="secondary"
onClick={onPassClick}
disabled={!myState.isTurn}
>
Pass
</Button>
</div>
{myState && myState.properties.length > 0 && (
<>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Typography
variant="overline"
className={classes.gameStateHeader}
>
My Properties
</Typography>
<Grid container item xs spacing={1} justify="flex-start">
{myState.properties.map((propertyCard) => (
<Grid item key={propertyCard.value}>
<Card className={`${classes.card}`}>
<CardMedia
src={propertyCard.image_url}
component="img"
className={classes.cardImage}
/>
<div className={`${classes.cardOverlay} front`}>
<Typography variant="h5">
{propertyCard.value}
</Typography>
</div>
</Card>
</Grid>
))}
</Grid>
</>
)}
</div>
</div>
)}
</>
);
};
export default PhaseOne; | },
activePlayerName: {
color: theme.palette.error.dark,
fontWeight: "bold", | random_line_split |
PhaseOne.js | import {
Badge,
Button,
Card,
CardMedia,
Divider,
Grid,
makeStyles,
Typography,
} from "@material-ui/core";
import { Alert, AlertTitle } from "@material-ui/lab";
import React, { useEffect, useState } from "react";
import { useSelector } from "react-redux";
import startSound from "../../assets/sound/start.wav";
import coinSound from "../../assets/sound/coin.wav";
import passSound from "../../assets/sound/warm_beep.wav";
// update type constants (manual sync with server-side constant needed)
const TYPES = {
BID: "BID",
PASS: "PASS",
};
const useStyles = makeStyles((theme) => ({
root: {
flexGrow: 1,
},
boardWrapper: {
padding: theme.spacing(1),
},
notTurn: {
pointerEvents: "none",
},
propertyRow: {
width: "100%",
},
card: {
position: "relative",
borderRadius: "12px",
width: "120px",
},
cardOverlay: {
position: "absolute",
top: 0,
left: 0,
bottom: 0,
right: 0,
display: "flex",
justifyContent: "center",
alignItems: "center",
backgroundColor: "rgba(0,0,0,0.5)",
"& *": {
color: "white",
},
"&.front": {
backgroundColor: "lightblue",
"& *": {
color: "black",
},
},
},
activePlayerName: {
color: theme.palette.error.dark,
fontWeight: "bold",
},
biddingStatus: {
border: "1px solid #aaa",
padding: theme.spacing(1),
backgroundColor: "#eee",
},
coinImage: {
width: "60px",
cursor: "pointer",
borderRadius: "100%",
"&.selected": {
border: "2px solid orange",
boxShadow: "0 0 10px orange",
},
},
coinStatusTable: {
borderTop: `1px solid #aaa`,
borderBottom: `1px solid #aaa`,
marginTop: theme.spacing(1),
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: `1px solid #aaa`,
},
"& p": {
fontWeight: "bold",
},
},
btnGroup: {
marginTop: theme.spacing(2),
textAlign: "center",
"&>button": {
fontWeight: "bold",
},
"&>button:first-child": {
marginRight: theme.spacing(4),
},
},
gameStateHeader: {
fontWeight: "bold",
fontSize: "0.8rem",
},
[theme.breakpoints.down("xs")]: {
cardImage: {
width: "70px",
},
coinStatusTable: {
"&>.MuiGrid-item:first-child, &>.MuiGrid-item:nth-child(2)": {
borderRight: "none",
borderBottom: `1px solid #aaa`,
},
},
},
}));
const PhaseOne = ({ socket, gameState, room }) => {
const classes = useStyles();
const auth = useSelector((state) => state.auth);
const [activePlayer, setActivePlayer] = useState(null);
const [myState, setMyState] = useState(null);
const [selectedCoins, setSelectedCoins] = useState([]);
const [selectedValues, setSelectedValues] = useState(0);
const [startAudio] = useState(new Audio(startSound));
const [coinAudio] = useState(new Audio(coinSound));
const [passAudio] = useState(new Audio(passSound));
useEffect(() => {
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding != null
) |
if (
gameState &&
activePlayer &&
gameState.players.find((player) => player.userId === activePlayer.userId)
.bidding == null
) {
passAudio.currentTime = 0;
passAudio.play();
}
const active = gameState?.players.find((player) => player.isTurn);
const me = gameState?.players.find(
(player) => player.userId === auth.userInfo._id
);
setActivePlayer(active);
setMyState(me);
}, [gameState]);
useEffect(() => {
startAudio.play();
}, []);
const onCoinClick = (index, value) => {
if (selectedCoins.includes(index)) {
setSelectedCoins(selectedCoins.filter((idx) => idx !== index));
setSelectedValues(selectedValues - value);
} else {
setSelectedCoins([...selectedCoins, index]);
setSelectedValues(selectedValues + value);
}
};
const onBidClick = () => {
socket.emit("updateForSale", {
type: TYPES.BID,
payload: {
selectedCoinsIndex: selectedCoins,
},
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
const onPassClick = () => {
socket.emit("updateForSale", {
type: TYPES.PASS,
room,
userId: myState.userId,
});
// unset selected coins from client-side view
setSelectedCoins([]);
setSelectedValues(0);
};
// Utility to display coin values with commas in every three digits
const numberWithCommas = (num) => {
return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
};
// Utility to render client user's remaining coin values
const remainingCoins = () => {
return numberWithCommas(
myState.coins.reduce((acc, coin) => acc + coin.value, 0)
);
};
// Utility to find minimum bid available for current round
const minimumBid = () => {
const bids = gameState.players.map((player) => player.bidding);
return Math.max(...bids) + 1000;
};
return (
<>
{gameState && myState && (
<div
className={`${classes.root} ${myState.isTurn ? "" : classes.notTurn}`}
>
<Alert severity="info" variant="standard">
{activePlayer ? (
<AlertTitle>
Player{" "}
<span className={classes.activePlayerName}>
{activePlayer.username}
</span>{" "}
is making a decision...
</AlertTitle>
) : (
<AlertTitle> New round is about to begin...</AlertTitle>
)}
</Alert>
<div className={classes.boardWrapper}>
<Grid
container
className={classes.propertyRow}
justify="center"
alignItems="flex-start"
spacing={2}
>
<Grid item>
<Card className={classes.card}>
<CardMedia
src="https://i.pinimg.com/236x/b9/70/33/b97033a8708d2cbaf7d1990020a89a54--playing-cards-deck.jpg"
component="img"
className={classes.cardImage}
/>
<div className={classes.cardOverlay}>
<Typography variant="h5">
{gameState.remainingProperties}
</Typography>
</div>
</Card>
</Grid>
<Grid container item xs spacing={1} justify="flex-start">
{gameState.openProperties.map((propertyCard) => {
const renderCard = () => (
<Card className={classes.card}>
<CardMedia
src={propertyCard.image_url}
component="img"
className={classes.cardImage}
/>
<div className={`${classes.cardOverlay} front`}>
<Typography variant="h5">
{propertyCard.value}
</Typography>
</div>
</Card>
);
const taken = propertyCard.taken;
return (
<Grid item key={propertyCard.value}>
{taken ? (
<Badge
badgeContent={taken}
color="primary"
anchorOrigin={{
vertical: "bottom",
horizontal: "left",
}}
>
{renderCard()}
</Badge>
) : (
renderCard()
)}
</Grid>
);
})}
</Grid>
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Typography variant="overline" className={classes.gameStateHeader}>
Bidding Status
</Typography>
<Grid container className={classes.biddingRow} spacing={1}>
{gameState.players.map((player) => {
return (
<Grid item xs={12} sm={6} key={player.userId}>
<Typography
variant="h6"
className={`${
activePlayer && player.userId === activePlayer.userId
? classes.activePlayerName
: ""
} ${classes.biddingStatus}`}
>
{player.username}:{" "}
{player.bidding || player.bidding === 0
? `$ ${numberWithCommas(player.bidding)}`
: "PASS"}
</Typography>
</Grid>
);
})}
</Grid>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Grid container spacing={1} justify="flex-start">
{myState.coins.map((coin, index) => {
return (
<Grid item key={index}>
<img
title={coin.value}
src={coin.image_url}
alt="coin"
className={`${classes.coinImage} ${
selectedCoins.includes(index) && "selected"
}`}
onClick={() => onCoinClick(index, coin.value)}
/>
</Grid>
);
})}
</Grid>
<Grid
container
spacing={2}
justify="center"
className={classes.coinStatusTable}
>
<Grid item xs={12} sm={4}>
<Typography>Remaining: $ {remainingCoins()}</Typography>
</Grid>
<Grid item xs={12} sm={4}>
<Typography>
Current: ${" "}
{numberWithCommas(selectedValues + myState.bidding)}
</Typography>
</Grid>
<Grid item xs={12} sm={4}>
<Typography>
Minimum: $ {numberWithCommas(minimumBid())}
</Typography>
</Grid>
</Grid>
<div className={classes.btnGroup}>
<Button
variant="contained"
color="primary"
onClick={onBidClick}
disabled={
selectedValues + myState.bidding < minimumBid() ||
!myState.isTurn
}
>
Bid
</Button>
<Button
variant="contained"
color="secondary"
onClick={onPassClick}
disabled={!myState.isTurn}
>
Pass
</Button>
</div>
{myState && myState.properties.length > 0 && (
<>
<Divider style={{ marginTop: "12px", marginBottom: "12px" }} />
<Typography
variant="overline"
className={classes.gameStateHeader}
>
My Properties
</Typography>
<Grid container item xs spacing={1} justify="flex-start">
{myState.properties.map((propertyCard) => (
<Grid item key={propertyCard.value}>
<Card className={`${classes.card}`}>
<CardMedia
src={propertyCard.image_url}
component="img"
className={classes.cardImage}
/>
<div className={`${classes.cardOverlay} front`}>
<Typography variant="h5">
{propertyCard.value}
</Typography>
</div>
</Card>
</Grid>
))}
</Grid>
</>
)}
</div>
</div>
)}
</>
);
};
export default PhaseOne;
| {
coinAudio.currentTime = 0;
coinAudio.play();
} | conditional_block |
main.go | package main
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Server struct {
db *mongo.Client
col *mongo.Collection
}
type Participants struct {
Name string
}
type Reaction struct {
Reaction string
Actor string
}
type Photo struct {
Uri string
Creation int
}
type Share struct {
Link string
}
type Messages struct {
Messages []Message `json:"messages"`
}
type Message struct {
Sender string
Timestamp int
Content string
Photos []Photo
Reactions []Reaction
Share Share
Type string
}
type ReturnMessage struct {
Sender string
Timestamp int
Content string
Photo Photo
Reactions []Reaction
Share Share
Type string
}
type ServerResponse struct {
MessageResults Messages
Error ErrorCode
LastID string
}
type ErrorCode string
const (
KeyPassPhrase string = "fjklj4kj12414980a9fasdvklavn!@$1"
MalformedPagedBySenderURL ErrorCode = "URL should look like '...?sender=example%20name&startAt=a8890ef6b...'"
SenderEmpty ErrorCode = "URL 'sender' parameter is empty"
)
// ObjectIdRegEx Only grabs alphanumeric ID and quotes between ObjectID()
var ObjectIdRegEx = regexp.MustCompile(`"(.*?)"`)
func reformatObjectId(objectId string) string {
fmt.Println("objectID passed in: ", objectId)
var idStringBeginning = "ObjectId("
var idStringEnd = ")"
id := ObjectIdRegEx.FindString(objectId)
if id == "" {
fmt.Println("Error in reformatObjectId")
return ""
}
return idStringBeginning + id + idStringEnd
}
func randomMessage(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Begin randomMessage")
// Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"]
if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil |
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
photos := "/photos/"
gifs := "/gifs/"
if strings.Contains(*path, videos) {
*path = stripVideoPath(*path)
}
if strings.Contains(*path, photos) {
*path = stripPhotoPath(*path)
}
if strings.Contains(*path, gifs) {
*path = stripGifPath(*path)
}
return origPhotos
}
func stripVideoPath(path string) string {
videoIndex := strings.Index(path, "/videos/")
return path[videoIndex:]
}
func stripPhotoPath(path string) string {
splitString := strings.SplitAfter(path, "/photos/")
return splitString[len(splitString)-1]
}
func stripGifPath(path string) string {
gifIndex := strings.Index(path, "/gifs/")
return path[gifIndex:]
}
func allMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sort", bson.D{{"timestamp", -1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func randomMessageBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func getPort() string {
port := os.Getenv("PORT")
if port == "" {
port = "8080" // Google services wants 8080 or will decide for us.
log.Printf("Defaulting to port %s", port)
}
return port
}
func main() {
//mongoURI := "mongodb://localhost:27017"
mongoURI := "mongodb+srv://kak:ricosuave@kak-6wzzo.gcp.mongodb.net/test?retryWrites=true&w=majority"
//client, cancel := mongolib.ConnectToMongoDB(mongoURI)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
fmt.Println("Error connecting to mongo DB: ", err)
}
defer cancel()
//collection := mongolib.GetMongoCollection(client, "nebrasketball", "messages")
collection := client.Database("nebrasketball").Collection("messages")
server := &Server{db: client, col: collection}
http.Handle("/", http.FileServer(http.Dir("./static")))
http.Handle("/random", randomMessage(server))
http.Handle("/randsender", randomMessageBySender(server))
http.Handle("/getallfromsender", allMessagesBySender(server))
http.Handle("/getpagedfromsender", pagedMessagesBySender(server))
port := getPort()
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
| {
fmt.Println("Error in encryptLastId(): ", err)
} | conditional_block |
main.go | package main
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Server struct {
db *mongo.Client
col *mongo.Collection
}
type Participants struct {
Name string
}
type Reaction struct {
Reaction string
Actor string
}
type Photo struct {
Uri string
Creation int
}
type Share struct {
Link string
}
type Messages struct {
Messages []Message `json:"messages"`
}
type Message struct {
Sender string
Timestamp int
Content string
Photos []Photo
Reactions []Reaction
Share Share
Type string
}
type ReturnMessage struct {
Sender string
Timestamp int
Content string
Photo Photo
Reactions []Reaction
Share Share
Type string
}
type ServerResponse struct {
MessageResults Messages
Error ErrorCode
LastID string
}
type ErrorCode string
const (
KeyPassPhrase string = "fjklj4kj12414980a9fasdvklavn!@$1"
MalformedPagedBySenderURL ErrorCode = "URL should look like '...?sender=example%20name&startAt=a8890ef6b...'"
SenderEmpty ErrorCode = "URL 'sender' parameter is empty"
)
// ObjectIdRegEx Only grabs alphanumeric ID and quotes between ObjectID()
var ObjectIdRegEx = regexp.MustCompile(`"(.*?)"`)
func reformatObjectId(objectId string) string {
fmt.Println("objectID passed in: ", objectId)
var idStringBeginning = "ObjectId("
var idStringEnd = ")"
id := ObjectIdRegEx.FindString(objectId)
if id == "" {
fmt.Println("Error in reformatObjectId")
return ""
}
return idStringBeginning + id + idStringEnd
}
func randomMessage(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Begin randomMessage")
// Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"]
if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
photos := "/photos/"
gifs := "/gifs/"
if strings.Contains(*path, videos) {
*path = stripVideoPath(*path)
}
if strings.Contains(*path, photos) {
*path = stripPhotoPath(*path)
}
if strings.Contains(*path, gifs) {
*path = stripGifPath(*path)
}
return origPhotos
}
func stripVideoPath(path string) string {
videoIndex := strings.Index(path, "/videos/")
return path[videoIndex:]
}
func | (path string) string {
splitString := strings.SplitAfter(path, "/photos/")
return splitString[len(splitString)-1]
}
func stripGifPath(path string) string {
gifIndex := strings.Index(path, "/gifs/")
return path[gifIndex:]
}
func allMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sort", bson.D{{"timestamp", -1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func randomMessageBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func getPort() string {
port := os.Getenv("PORT")
if port == "" {
port = "8080" // Google services wants 8080 or will decide for us.
log.Printf("Defaulting to port %s", port)
}
return port
}
func main() {
//mongoURI := "mongodb://localhost:27017"
mongoURI := "mongodb+srv://kak:ricosuave@kak-6wzzo.gcp.mongodb.net/test?retryWrites=true&w=majority"
//client, cancel := mongolib.ConnectToMongoDB(mongoURI)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
fmt.Println("Error connecting to mongo DB: ", err)
}
defer cancel()
//collection := mongolib.GetMongoCollection(client, "nebrasketball", "messages")
collection := client.Database("nebrasketball").Collection("messages")
server := &Server{db: client, col: collection}
http.Handle("/", http.FileServer(http.Dir("./static")))
http.Handle("/random", randomMessage(server))
http.Handle("/randsender", randomMessageBySender(server))
http.Handle("/getallfromsender", allMessagesBySender(server))
http.Handle("/getpagedfromsender", pagedMessagesBySender(server))
port := getPort()
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
| stripPhotoPath | identifier_name |
main.go | package main
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Server struct {
db *mongo.Client
col *mongo.Collection
}
type Participants struct {
Name string
}
type Reaction struct {
Reaction string
Actor string
}
type Photo struct {
Uri string
Creation int
}
type Share struct {
Link string
}
type Messages struct {
Messages []Message `json:"messages"`
}
type Message struct {
Sender string
Timestamp int
Content string
Photos []Photo
Reactions []Reaction
Share Share
Type string
}
type ReturnMessage struct {
Sender string
Timestamp int
Content string
Photo Photo
Reactions []Reaction
Share Share
Type string
}
type ServerResponse struct {
MessageResults Messages
Error ErrorCode
LastID string
}
type ErrorCode string
const (
KeyPassPhrase string = "fjklj4kj12414980a9fasdvklavn!@$1"
MalformedPagedBySenderURL ErrorCode = "URL should look like '...?sender=example%20name&startAt=a8890ef6b...'"
SenderEmpty ErrorCode = "URL 'sender' parameter is empty"
)
// ObjectIdRegEx Only grabs alphanumeric ID and quotes between ObjectID()
var ObjectIdRegEx = regexp.MustCompile(`"(.*?)"`)
func reformatObjectId(objectId string) string {
fmt.Println("objectID passed in: ", objectId)
var idStringBeginning = "ObjectId("
var idStringEnd = ")"
id := ObjectIdRegEx.FindString(objectId)
if id == "" {
fmt.Println("Error in reformatObjectId")
return ""
}
return idStringBeginning + id + idStringEnd
}
func randomMessage(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Begin randomMessage")
// Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"]
if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
photos := "/photos/"
gifs := "/gifs/"
if strings.Contains(*path, videos) {
*path = stripVideoPath(*path)
}
if strings.Contains(*path, photos) {
*path = stripPhotoPath(*path)
}
if strings.Contains(*path, gifs) {
*path = stripGifPath(*path)
}
return origPhotos
}
func stripVideoPath(path string) string {
videoIndex := strings.Index(path, "/videos/")
return path[videoIndex:]
}
func stripPhotoPath(path string) string |
func stripGifPath(path string) string {
gifIndex := strings.Index(path, "/gifs/")
return path[gifIndex:]
}
func allMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sort", bson.D{{"timestamp", -1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func randomMessageBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func getPort() string {
port := os.Getenv("PORT")
if port == "" {
port = "8080" // Google services wants 8080 or will decide for us.
log.Printf("Defaulting to port %s", port)
}
return port
}
func main() {
//mongoURI := "mongodb://localhost:27017"
mongoURI := "mongodb+srv://kak:ricosuave@kak-6wzzo.gcp.mongodb.net/test?retryWrites=true&w=majority"
//client, cancel := mongolib.ConnectToMongoDB(mongoURI)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
fmt.Println("Error connecting to mongo DB: ", err)
}
defer cancel()
//collection := mongolib.GetMongoCollection(client, "nebrasketball", "messages")
collection := client.Database("nebrasketball").Collection("messages")
server := &Server{db: client, col: collection}
http.Handle("/", http.FileServer(http.Dir("./static")))
http.Handle("/random", randomMessage(server))
http.Handle("/randsender", randomMessageBySender(server))
http.Handle("/getallfromsender", allMessagesBySender(server))
http.Handle("/getpagedfromsender", pagedMessagesBySender(server))
port := getPort()
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
| {
splitString := strings.SplitAfter(path, "/photos/")
return splitString[len(splitString)-1]
} | identifier_body |
main.go | package main
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type Server struct {
db *mongo.Client
col *mongo.Collection
}
type Participants struct {
Name string
}
type Reaction struct {
Reaction string
Actor string
}
type Photo struct {
Uri string
Creation int
}
type Share struct {
Link string
}
type Messages struct {
Messages []Message `json:"messages"`
}
type Message struct {
Sender string
Timestamp int
Content string
Photos []Photo
Reactions []Reaction
Share Share
Type string
}
type ReturnMessage struct {
Sender string
Timestamp int
Content string
Photo Photo
Reactions []Reaction
Share Share
Type string
}
type ServerResponse struct {
MessageResults Messages
Error ErrorCode
LastID string
}
type ErrorCode string
const (
KeyPassPhrase string = "fjklj4kj12414980a9fasdvklavn!@$1"
MalformedPagedBySenderURL ErrorCode = "URL should look like '...?sender=example%20name&startAt=a8890ef6b...'"
SenderEmpty ErrorCode = "URL 'sender' parameter is empty"
)
// ObjectIdRegEx Only grabs alphanumeric ID and quotes between ObjectID()
var ObjectIdRegEx = regexp.MustCompile(`"(.*?)"`)
func reformatObjectId(objectId string) string {
fmt.Println("objectID passed in: ", objectId)
var idStringBeginning = "ObjectId("
var idStringEnd = ")"
id := ObjectIdRegEx.FindString(objectId)
if id == "" {
fmt.Println("Error in reformatObjectId")
return ""
}
return idStringBeginning + id + idStringEnd
}
func randomMessage(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Println("Begin randomMessage")
// Construct aggregation "pipeline" to return 1 random document from entire collection
pipeline := []bson.D{bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var result Message
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
fmt.Println("Result: ", result)
}
if checkForVideo(result) {
randomMessage(s)
}
retMessage := craftReturnMessage(result)
jsonResult, _ := json.Marshal(retMessage)
w.Write(jsonResult)
fmt.Println("End randomMessage")
})
}
func createEmptyServerResponseWithError(err ErrorCode) ServerResponse {
return ServerResponse{
Error: err,
MessageResults: Messages{},
LastID: ""}
}
// First string = sender
// Second string = startingId (if any)
// If ServerResponse != nil -> Return it, because we have an error
func getPagedQueryTerms(r *http.Request) (string, string, ServerResponse) {
query := r.URL.Query()
if len(query) == 0 {
responseObject := createEmptyServerResponseWithError(MalformedPagedBySenderURL)
return "", "", responseObject
}
senderQ := query["sender"] | }
sender := senderQ[0]
if sender == "" {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject
}
startingIdQ := query["startAt"]
var startingId string
if len(startingIdQ) == 0 {
startingId = ""
} else {
startingId = startingIdQ[0]
}
return sender, startingId, ServerResponse{}
}
func encryptLastId(lastId string) string {
fmt.Println("Beginning encryptLastId()")
// Generate AES cipher with 32 byte passphrase
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// GCM "Galois/Counter Mode": Symmetric Keyy cryptographic block cipher
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
// Nonce is literally a "one off" byte array which will be populated by a random sequence below.
// The nonce is prepended/appended to the cipher (?) and is used in deciphering
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
fmt.Println("Error in io.ReadFull: ", err)
}
encryptedByteArray := gcm.Seal(nonce, nonce, []byte(lastId), nil)
// Convert to Base64 to ensure we can transmit via HTTP without error or corruption
encryptedString := base64.StdEncoding.EncodeToString(encryptedByteArray)
fmt.Println("Ending encryptLastId()")
return encryptedString
}
func decryptLastId(encLastId string) string {
fmt.Println("Beginning decryptLastId()")
encLastIdByteArray, err := base64.StdEncoding.DecodeString(encLastId)
if err != nil {
fmt.Println("Error in StdEncoding.DecodeString: ", err)
}
aesCipher, err := aes.NewCipher([]byte(KeyPassPhrase))
if err != nil {
fmt.Println("Error in decryptLastId(): ", err)
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
fmt.Println("Error in encryptLastId(): ", err)
}
nonceSize := gcm.NonceSize()
nonce, cipherText := encLastIdByteArray[:nonceSize], encLastIdByteArray[nonceSize:]
decryptedLastId, err := gcm.Open(nil, []byte(nonce), []byte(cipherText), nil)
if err != nil {
fmt.Println("Error in gcm.Open: ", err)
}
fmt.Println("Ending decryptLastId()")
return string(decryptedLastId)
}
func pagedMessagesLogic(s *Server, r *http.Request) ServerResponse {
fmt.Println("Begin pagedMessagesBySender()")
maxItems := 10
sender, startingId, err := getPagedQueryTerms(r)
if err.Error != "" {
return err
}
fmt.Println("StartingID: ", startingId)
fmt.Println("Sender: ", sender)
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
// pipeline := []bson.D{bson.D{{"$match", {bson.D{{"sender", sender}}}}}, bson.D{{"$limit", maxItems}}}
// pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
pipeline := pagedPipelineBuilder(sender, startingId, maxItems)
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var messageBatch Messages
var result Message
var rawId bson.RawValue
for cursor.Next(context.Background()) {
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in pagedMessagesBySender() cursor: ", cursorErr)
}
messageBatch.Messages = append(messageBatch.Messages, result)
rawId = cursor.Current.Lookup("_id")
}
lastId := stringFromRawValue(rawId)
encryptedLastId := encryptLastId(lastId)
serverResponse := ServerResponse{
MessageResults: messageBatch,
Error: "",
LastID: encryptedLastId}
return serverResponse
}
func stringFromRawValue(rawId bson.RawValue) string {
objectID := rawId.ObjectID().String()
lastId := strings.Split(objectID, "\"")
return lastId[1]
}
func pagedPipelineBuilder(sender string, startingId string, limit int) []bson.M {
//pipeline := []bson.M{bson.M{"$match": bson.M{"sender": sender, "_id": bson.M{"$gt": startingId}}}, bson.M{"$limit": maxItems}}
matchElement := matchPipelineBuilder(sender, startingId)
limitElement := bson.M{"$limit": limit}
pipeline := []bson.M{matchElement, limitElement}
return pipeline
}
func matchPipelineBuilder(sender string, startingId string) bson.M {
matchRoot := bson.M{"$match": ""}
senderElement := bson.M{"sender": sender}
idElement := bson.M{"_id": ""}
gtElement := bson.M{"$gt": ""}
if startingId == "" {
matchRoot["$match"] = senderElement
} else {
gtElement["$gt"] = startingId
idElement["_id"] = gtElement
tempArray := []bson.M{senderElement, idElement}
matchRoot["$match"] = tempArray
}
fmt.Println("REturning matchroot: ", matchRoot)
return matchRoot
}
func pagedMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
returnObject := pagedMessagesLogic(s, r)
returnJson, err := json.Marshal(returnObject)
if err != nil {
fmt.Println("Error converted pagedMessagesLogic() response to JSON: ", err)
}
w.Write(returnJson)
fmt.Println("End pagedMessagesBySender()")
})
}
func craftReturnMessage(objIn Message) ReturnMessage {
objIn.Photos = handleMediaPath(objIn.Photos)
newMessage := ReturnMessage{
Sender: objIn.Sender,
Content: objIn.Content,
Timestamp: objIn.Timestamp,
Share: objIn.Share,
Reactions: objIn.Reactions,
}
if len(objIn.Photos) > 0 {
newMessage.Photo = objIn.Photos[0]
}
return newMessage
}
func capitalizeName(name string) string {
return strings.Title(name)
}
func checkForVideo(obj Message) bool {
fmt.Println("in check for video")
if obj.Photos == nil {
return false
}
path := obj.Photos[0].Uri
ext := ".mp4"
fmt.Println("Path: ", path)
fmt.Println(strings.Contains(path, ext))
return strings.Contains(path, ext)
}
func handleMediaPath(origPhotos []Photo) []Photo {
if origPhotos == nil {
return origPhotos
}
if origPhotos[0].Uri == "" {
return origPhotos
}
path := &origPhotos[0].Uri
videos := "/videos/"
photos := "/photos/"
gifs := "/gifs/"
if strings.Contains(*path, videos) {
*path = stripVideoPath(*path)
}
if strings.Contains(*path, photos) {
*path = stripPhotoPath(*path)
}
if strings.Contains(*path, gifs) {
*path = stripGifPath(*path)
}
return origPhotos
}
func stripVideoPath(path string) string {
videoIndex := strings.Index(path, "/videos/")
return path[videoIndex:]
}
func stripPhotoPath(path string) string {
splitString := strings.SplitAfter(path, "/photos/")
return splitString[len(splitString)-1]
}
func stripGifPath(path string) string {
gifIndex := strings.Index(path, "/gifs/")
return path[gifIndex:]
}
func allMessagesBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sort", bson.D{{"timestamp", -1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func randomMessageBySender(s *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()["participant"]
sender := capitalizeName(q[0])
pipeline := []bson.D{
bson.D{{"$match", bson.D{{"sender", sender}}}},
bson.D{{"$sample", bson.D{{"size", 1}}}}}
cursor, _ := s.col.Aggregate(context.Background(), pipeline)
var allMessages Messages
for cursor.Next(context.Background()) {
var result Message
cursorErr := cursor.Decode(&result)
if cursorErr != nil {
log.Fatal("Error in random() cursor")
}
allMessages.Messages = append(allMessages.Messages, result)
}
jAllMessages, _ := json.Marshal(allMessages)
w.Write(jAllMessages)
})
}
func getPort() string {
port := os.Getenv("PORT")
if port == "" {
port = "8080" // Google services wants 8080 or will decide for us.
log.Printf("Defaulting to port %s", port)
}
return port
}
func main() {
//mongoURI := "mongodb://localhost:27017"
mongoURI := "mongodb+srv://kak:ricosuave@kak-6wzzo.gcp.mongodb.net/test?retryWrites=true&w=majority"
//client, cancel := mongolib.ConnectToMongoDB(mongoURI)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongoURI))
if err != nil {
fmt.Println("Error connecting to mongo DB: ", err)
}
defer cancel()
//collection := mongolib.GetMongoCollection(client, "nebrasketball", "messages")
collection := client.Database("nebrasketball").Collection("messages")
server := &Server{db: client, col: collection}
http.Handle("/", http.FileServer(http.Dir("./static")))
http.Handle("/random", randomMessage(server))
http.Handle("/randsender", randomMessageBySender(server))
http.Handle("/getallfromsender", allMessagesBySender(server))
http.Handle("/getpagedfromsender", pagedMessagesBySender(server))
port := getPort()
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
} | if len(senderQ) == 0 {
responseObject := createEmptyServerResponseWithError(SenderEmpty)
return "", "", responseObject | random_line_split |
clientlib-promoengine.js | $(document).ready(function(){if($('[name="isauthor"]').val()=="true"){var b=window.location.href;
if(b.indexOf("preview=true")!=-1){if(b.indexOf("promoPath=")!=-1){var c=b.split("promoPath=")[1];
if(c.indexOf("&")!=-1&&b.indexOf("position=")!=-1){c=c.split("&")[0];
var a=b.split("position=")[1];
if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay(c,a,"preview")
}}}}}else{if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay("","","")
}}});
function hcontentCarddisplay(b,e,f){function a(){var l=new Date();
var k=l.getTimezoneOffset();
k=(k/60)*-1;
var j=l.getTime();
if(k!==0){return(j+(3600000*k))
}return j
}var d=$('[name="resolvedPath"]').val();
var h=$('[name="pagePath"]').attr("content");
var g=[];
$("body").find(".hcontentcard.parbase").each(function(){var j=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var k=$(this).find("input#IsMbox").val();
if(j==="true"&&l!=""&&l!=undefined){g.push(l)
}});
var c=$('[name="localeVal"]').val();
if(c==="null"||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');">'+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}else{var p=U=="true"?"_self":"_blank";
s+=' <a class="learn_more" title='+L+" target="+p+" href="+ac+">"+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}}if(O!=""){s+='<span class="datestamp">'+O.split("T")[0]+"</span>"
}s+="</div>"
}s+="</div>";
var v="";
if(t==="true"){if(m!="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-bars detail-toggle non-product"></i></div>'
}if(m==="l4enterprise") | }s+='<div class="social-block">';
var Y="";
var K="";
var H="";
if(ac!=""||ac.includes("/content/dam/")){Y=ac;
K="redirect"
}else{if(B!=null&&B!=""){Y=B;
H="brightcove"
}else{Y=X
}}if(ad!=""){s+='<a data-share="twitter" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-via="VMware" data-summary = "'+z+'" data-cta-link = "'+z+'"><i class="fa fa-twitter"></i></a>'
}if(Q!=""){s+='<a data-share="facebook" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-facebook"></i></a>'
}if(N!=""){s+='<a data-share="linkedin" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-linkedin"></i></a>'
}if(T!=""){s+='<a data-share="google" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-google-plus"></i></a>'
}s+="</div>";
s+="</div>";
s+="</div>";
R+=C+s;
$(o).find(".thumb-container ").remove();
$(o).find(".col-md-12").append(R)
}})
}})
}}).fail(function(j,l,k){console.log("failed");
$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
})
}else{$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
}}$("body").bind("DOMNodeInserted",function(){$(this).find(".detail-content p").addClass("clampingDetail");
$(".detail-content").each(function(){if($(this).find("h3").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singlePara")
}else{if($(this).find("p").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singleHeading")
}else{if($(this).find("h3").length<1){$(this).addClass("onlyPara")
}else{if($(this).find("p").length<1){$(this).addClass("onlyHeading")
}else{if($(this).parent(".col-md-10").find(".cta_module").length<1&&$(this).parent(".col-md-10").parent(".social-block").find(".social-block").length<1){$(this).addClass("onlyContent")
}}}}}})
});
$(window).load(function(){$(window).trigger("resize")
}); | {s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-plus-square detail-toggle"></i></div>'
} | conditional_block |
clientlib-promoengine.js | $(document).ready(function(){if($('[name="isauthor"]').val()=="true"){var b=window.location.href;
if(b.indexOf("preview=true")!=-1){if(b.indexOf("promoPath=")!=-1){var c=b.split("promoPath=")[1];
if(c.indexOf("&")!=-1&&b.indexOf("position=")!=-1){c=c.split("&")[0];
var a=b.split("position=")[1];
if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay(c,a,"preview")
}}}}}else{if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay("","","")
}}});
function hcontentCarddisplay(b,e,f){function a(){var l=new Date();
var k=l.getTimezoneOffset();
k=(k/60)*-1;
var j=l.getTime();
if(k!==0){return(j+(3600000*k))
}return j
}var d=$('[name="resolvedPath"]').val();
var h=$('[name="pagePath"]').attr("content");
var g=[];
$("body").find(".hcontentcard.parbase").each(function(){var j=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var k=$(this).find("input#IsMbox").val();
if(j==="true"&&l!=""&&l!=undefined){g.push(l)
}});
var c=$('[name="localeVal"]').val();
if(c==="null"||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>' | if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');">'+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}else{var p=U=="true"?"_self":"_blank";
s+=' <a class="learn_more" title='+L+" target="+p+" href="+ac+">"+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}}if(O!=""){s+='<span class="datestamp">'+O.split("T")[0]+"</span>"
}s+="</div>"
}s+="</div>";
var v="";
if(t==="true"){if(m!="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-bars detail-toggle non-product"></i></div>'
}if(m==="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-plus-square detail-toggle"></i></div>'
}}s+='<div class="social-block">';
var Y="";
var K="";
var H="";
if(ac!=""||ac.includes("/content/dam/")){Y=ac;
K="redirect"
}else{if(B!=null&&B!=""){Y=B;
H="brightcove"
}else{Y=X
}}if(ad!=""){s+='<a data-share="twitter" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-via="VMware" data-summary = "'+z+'" data-cta-link = "'+z+'"><i class="fa fa-twitter"></i></a>'
}if(Q!=""){s+='<a data-share="facebook" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-facebook"></i></a>'
}if(N!=""){s+='<a data-share="linkedin" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-linkedin"></i></a>'
}if(T!=""){s+='<a data-share="google" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-google-plus"></i></a>'
}s+="</div>";
s+="</div>";
s+="</div>";
R+=C+s;
$(o).find(".thumb-container ").remove();
$(o).find(".col-md-12").append(R)
}})
}})
}}).fail(function(j,l,k){console.log("failed");
$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
})
}else{$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
}}$("body").bind("DOMNodeInserted",function(){$(this).find(".detail-content p").addClass("clampingDetail");
$(".detail-content").each(function(){if($(this).find("h3").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singlePara")
}else{if($(this).find("p").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singleHeading")
}else{if($(this).find("h3").length<1){$(this).addClass("onlyPara")
}else{if($(this).find("p").length<1){$(this).addClass("onlyHeading")
}else{if($(this).parent(".col-md-10").find(".cta_module").length<1&&$(this).parent(".col-md-10").parent(".social-block").find(".social-block").length<1){$(this).addClass("onlyContent")
}}}}}})
});
$(window).load(function(){$(window).trigger("resize")
}); | }else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>"; | random_line_split |
clientlib-promoengine.js | $(document).ready(function(){if($('[name="isauthor"]').val()=="true"){var b=window.location.href;
if(b.indexOf("preview=true")!=-1){if(b.indexOf("promoPath=")!=-1){var c=b.split("promoPath=")[1];
if(c.indexOf("&")!=-1&&b.indexOf("position=")!=-1){c=c.split("&")[0];
var a=b.split("position=")[1];
if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay(c,a,"preview")
}}}}}else{if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay("","","")
}}});
function | (b,e,f){function a(){var l=new Date();
var k=l.getTimezoneOffset();
k=(k/60)*-1;
var j=l.getTime();
if(k!==0){return(j+(3600000*k))
}return j
}var d=$('[name="resolvedPath"]').val();
var h=$('[name="pagePath"]').attr("content");
var g=[];
$("body").find(".hcontentcard.parbase").each(function(){var j=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var k=$(this).find("input#IsMbox").val();
if(j==="true"&&l!=""&&l!=undefined){g.push(l)
}});
var c=$('[name="localeVal"]').val();
if(c==="null"||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');">'+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}else{var p=U=="true"?"_self":"_blank";
s+=' <a class="learn_more" title='+L+" target="+p+" href="+ac+">"+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}}if(O!=""){s+='<span class="datestamp">'+O.split("T")[0]+"</span>"
}s+="</div>"
}s+="</div>";
var v="";
if(t==="true"){if(m!="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-bars detail-toggle non-product"></i></div>'
}if(m==="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-plus-square detail-toggle"></i></div>'
}}s+='<div class="social-block">';
var Y="";
var K="";
var H="";
if(ac!=""||ac.includes("/content/dam/")){Y=ac;
K="redirect"
}else{if(B!=null&&B!=""){Y=B;
H="brightcove"
}else{Y=X
}}if(ad!=""){s+='<a data-share="twitter" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-via="VMware" data-summary = "'+z+'" data-cta-link = "'+z+'"><i class="fa fa-twitter"></i></a>'
}if(Q!=""){s+='<a data-share="facebook" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-facebook"></i></a>'
}if(N!=""){s+='<a data-share="linkedin" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-linkedin"></i></a>'
}if(T!=""){s+='<a data-share="google" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-google-plus"></i></a>'
}s+="</div>";
s+="</div>";
s+="</div>";
R+=C+s;
$(o).find(".thumb-container ").remove();
$(o).find(".col-md-12").append(R)
}})
}})
}}).fail(function(j,l,k){console.log("failed");
$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
})
}else{$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
}}$("body").bind("DOMNodeInserted",function(){$(this).find(".detail-content p").addClass("clampingDetail");
$(".detail-content").each(function(){if($(this).find("h3").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singlePara")
}else{if($(this).find("p").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singleHeading")
}else{if($(this).find("h3").length<1){$(this).addClass("onlyPara")
}else{if($(this).find("p").length<1){$(this).addClass("onlyHeading")
}else{if($(this).parent(".col-md-10").find(".cta_module").length<1&&$(this).parent(".col-md-10").parent(".social-block").find(".social-block").length<1){$(this).addClass("onlyContent")
}}}}}})
});
$(window).load(function(){$(window).trigger("resize")
}); | hcontentCarddisplay | identifier_name |
clientlib-promoengine.js | $(document).ready(function(){if($('[name="isauthor"]').val()=="true"){var b=window.location.href;
if(b.indexOf("preview=true")!=-1){if(b.indexOf("promoPath=")!=-1){var c=b.split("promoPath=")[1];
if(c.indexOf("&")!=-1&&b.indexOf("position=")!=-1){c=c.split("&")[0];
var a=b.split("position=")[1];
if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay(c,a,"preview")
}}}}}else{if($("body").find(".hcontentcard.parbase").html()!=undefined){hcontentCarddisplay("","","")
}}});
function hcontentCarddisplay(b,e,f){function a() | var d=$('[name="resolvedPath"]').val();
var h=$('[name="pagePath"]').attr("content");
var g=[];
$("body").find(".hcontentcard.parbase").each(function(){var j=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var k=$(this).find("input#IsMbox").val();
if(j==="true"&&l!=""&&l!=undefined){g.push(l)
}});
var c=$('[name="localeVal"]').val();
if(c==="null"||c===""){var i=d;
if(i.indexOf("/content/vmware/vmware-preview-sites")>-1){i=i.replace("/content/vmware/vmware-preview-sites/","");
c=i.split("/")[0]
}else{if(i.indexOf("/content/vmware/vmware-published-sites")>-1){i=i.replace("/content/vmware/vmware-published-sites/","");
c=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-preview-sites")>-1){i=h.replace("/content/vmware/vmware-preview-sites/","");
redirect_locale=i.split("/")[0]
}else{if(h.indexOf("/content/vmware/vmware-published-sites")>-1){i=h.replace("/content/vmware/vmware-published-sites/","");
redirect_locale=i.split("/")[0]
}}c=d.split("/")[1];
d=h.replace(redirect_locale,c)
}}}if(d.indexOf("/content/vmware/vmware-preview-sites")>-1){d=d.replace("/content/vmware/vmware-preview-sites","/content/vmware/vmware-published-sites")
}if(g.length>0){$.ajax({url:"/bin/vmware/promotionalcontent",type:"Get",async:true,data:{path:d,promopositionArray:JSON.stringify(g),currentDate:a(),promoPath:b,position:e,preview:f,locale:c}}).done(function(j){if(j.PromoJSon!=undefined){var k=j.PromoJSon;
$("body").find(".hcontentcard.parbase").each(function(){var o=$(this);
o.find(".thumb-container").removeAttr("style");
var n=$(this).find("input#promotionalContent").val();
var l=$(this).find("input#promoposition").val();
var m=$(this).find("input#templateName").val();
if(n==="true"&&promoposition!=""&&promoposition!=undefined){$.each(k,function(A,S){var q=S.isValidPromo;
var J=S.promoPosition!=undefined?S.promoPosition:"";
if(q&&J===l){var t=S.hamBurgerMenu!=undefined?S.hamBurgerMenu:"";
var ac=S.ctaPath!=undefined?S.ctaPath:"";
var D=S.ctaLabel!=undefined?S.ctaLabel:"";
var L=S.ctaLinkTitle!=undefined?S.ctaLinkTitle:"";
var O=S.date!=undefined?S.date:"";
var E=S.promoTitle!=undefined?S.promoTitle:"";
var V=S.promoContent!=undefined?S.promoContent:"";
var w=S.bcvtrue!=undefined?S.bcvtrue:"";
var aa=S.iconval!=undefined?S.iconval:"";
var u=false;
var M=S.altText!=undefined?S.altText:"";
var y=S.playicon?S.playicon:"";
var U=S.windowSelection!=undefined?S.windowSelection:"";
var I=S.zoomIcon!=undefined?S.zoomIcon:"";
var Z=S.largeImageLink!=undefined?S.largeImageLink:"";
var ab=S.imagePath!=undefined?S.imagePath:"";
var B=S.bcvid!=undefined?S.bcvid:"";
var W=S.expandImageCheckbox!=undefined?S.expandImageCheckbox:"";
var P=S.bcduration!=undefined?S.bcduration:"";
var ad=S.twitter!=undefined?S.twitter:"";
var N=S.linkedin!=undefined?S.linkedin:"";
var T=S.googleplus!=undefined?S.googleplus:"";
var Q=S.facebook!=undefined?S.facebook:"";
var G=S.props!=undefined?S.props:"";
var r=S.updatedbody!=undefined?S.updatedbody:"";
var z=S.description!=undefined?S.description:"";
var X=$(o).find(".section-custom").attr("id")!=undefined?$(o).find(".section-custom").attr("id"):"";
var x=$(o).find("#divId").val()!=undefined?$(o).find("#divId").val():"";
if(t===""){t="true"
}var R="";
if(t!=true&&ac===""&&O===""&&(E===""||V==="")){R='<div class="thumb-container withoutHamModule">'
}else{R='<div class="thumb-container">'
}if(w==="false"){if(aa==="fa fa-youtube"){u=true;
var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{if(aa==="fa fa-video-camera"){var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(!(y==="false")){var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{var p=U=="true"?"_self":"_blank";
C+="<a "+G+' class="learn_more" href='+ac+" target="+p+'><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}else{var C='<div class="thumb-img alt-background" id="'+x+'" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(W=="true"){if(!(I==="false")){C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView" aria-hidden="true"></i></a>'
}else{C+='<a onclick="return ImageLargeView(this);" largeimagename='+Z+" altText="+M+' href="javascript:void(0)"><i class="fa fa-search-plus img-largeView dark" aria-hidden="true"></i></a>'
}}}}C+="</div>"
}else{var C='<div class="thumb-img alt-background" style="background-image:url('+ab+')">';
C+='<p class="alt-text">'+M+"</p>";
if(B!=null&&(B!="")){if(!(y==="false")){C+=" <a asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/play_icon.png" alt="Video Play Icon" /></a>'
}else{C+=" <a "+G+" asset-id="+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');"><img src="/content/dam/digitalmarketing/vmware/global-icons/dark-play-icon.png" alt="Video Play Icon" /></a>'
}}C+="</div>"
}var s='<div class="thumb-details">';
if(w!="false"&&B!=""&&P!=""){s+='<span class="timestamp">'+P+"</span>"
}s+='<div class="col-xs-1 col-md-1 col-sm-1">';
if(aa==="fa support_ico"){aa="fa support_ico"
}else{aa=aa
}s+='<i class="'+aa+'"></i></div>';
s+='<div class="col-xs-10 col-md-10 col-sm-10">';
if(!(t==="true")){var F="no"
}else{var F=""
}s+='<div class="detail-content '+F+'clamp">';
if(E!=""&&V!=""){s+='<h3 class="'+F+'clampingDetail"><span>'+E+"</span></h3>";
s+=r
}else{if(E!=""){s+='<h3 class="'+F+'clampingDetail"><p>'+E+"</p>"
}else{s+=r
}}s+="</div>";
if(ac!=""&&D!=""||O!=""){s+='<div class="cta_module">';
if(ac!=""&&D!=""){if(w!="false"&&B!=""){s+=' <a class="learn_more" asset-id='+B+' data-element-type="video" href="javascript:void(0);" onclick="return showVideo('+B+');">'+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}else{var p=U=="true"?"_self":"_blank";
s+=' <a class="learn_more" title='+L+" target="+p+" href="+ac+">"+D+'<i class="fa fa-angle-double-right inline"></i></a>'
}}if(O!=""){s+='<span class="datestamp">'+O.split("T")[0]+"</span>"
}s+="</div>"
}s+="</div>";
var v="";
if(t==="true"){if(m!="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-bars detail-toggle non-product"></i></div>'
}if(m==="l4enterprise"){s+='<div class="col-xs-1 col-md-1 col-sm-1 cntClk"><i class="fa fa-plus-square detail-toggle"></i></div>'
}}s+='<div class="social-block">';
var Y="";
var K="";
var H="";
if(ac!=""||ac.includes("/content/dam/")){Y=ac;
K="redirect"
}else{if(B!=null&&B!=""){Y=B;
H="brightcove"
}else{Y=X
}}if(ad!=""){s+='<a data-share="twitter" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-via="VMware" data-summary = "'+z+'" data-cta-link = "'+z+'"><i class="fa fa-twitter"></i></a>'
}if(Q!=""){s+='<a data-share="facebook" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-facebook"></i></a>'
}if(N!=""){s+='<a data-share="linkedin" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-linkedin"></i></a>'
}if(T!=""){s+='<a data-share="google" href="javascript:void(0);" data-url="'+Y+'" data-redirect="'+K+'" data-brightcove="'+H+'" data-title="'+E+'" data-image ="'+ab+'" data-summary = "'+z+'"><i class="fa fa-google-plus"></i></a>'
}s+="</div>";
s+="</div>";
s+="</div>";
R+=C+s;
$(o).find(".thumb-container ").remove();
$(o).find(".col-md-12").append(R)
}})
}})
}}).fail(function(j,l,k){console.log("failed");
$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
})
}else{$("body").find(".hcontentcard.parbase").each(function(){$(this).find(".thumb-container").removeAttr("style")
})
}}$("body").bind("DOMNodeInserted",function(){$(this).find(".detail-content p").addClass("clampingDetail");
$(".detail-content").each(function(){if($(this).find("h3").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singlePara")
}else{if($(this).find("p").length<1&&$(this).parent(".col-md-10").find(".cta_module").length<1){$(this).addClass("singleHeading")
}else{if($(this).find("h3").length<1){$(this).addClass("onlyPara")
}else{if($(this).find("p").length<1){$(this).addClass("onlyHeading")
}else{if($(this).parent(".col-md-10").find(".cta_module").length<1&&$(this).parent(".col-md-10").parent(".social-block").find(".social-block").length<1){$(this).addClass("onlyContent")
}}}}}})
});
$(window).load(function(){$(window).trigger("resize")
}); | {var l=new Date();
var k=l.getTimezoneOffset();
k=(k/60)*-1;
var j=l.getTime();
if(k!==0){return(j+(3600000*k))
}return j
} | identifier_body |
image_pyramid.py |
import numpy as np
from matplotlib import pyplot as plt
import math
import cv2
import os
import shutil
import sys
from skimage.measure import compare_ssim
# from scipy.misc import imfilter
from skimage import color, data, restoration
from scipy.signal import convolve2d
from skimage.exposure import rescale_intensity
import numpy as np
import argparse
import cv2
# from PIL import Image
# from resizeimage import resizeimage
class ImageEnhancement:
def gaussian_blurring(self,input_image,kernel_size,sigma):
"""Get Guassian Blurred image.
@param input_image: The source image.
@param kernel_size: Size of the filter
@param sigma:Control variation around its mean value
@return The gaussian blurred image
"""
#Applying Gaussian Blur filter
output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)
return output_image
def sampling(self,input_image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
|
def get_transmission(self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
print('Processing %s ...' % basename)
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
#Convert back to rgb
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
psf = np.ones((5, 5)) / 25
#Applying mean denoising filtering
dst=cv2.fastNlMeansDenoisingColored(final,None,10,10,7,21)
edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
print(edges.shape)
edges=cv2.resize(edges,(dst.shape[1],dst.shape[0]))
#Increasing the brightness of the image
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)
h,s,v=cv2.split(hsv)
value = 30 #whatever value you want to add
lim=255-value
s[s>lim]=255
s[s<lim]+=value
value1=30
lim1=255-value1
v[v>lim1]=255
v[v<lim1]+=value1
hsv = cv2.merge((h, s, v))
dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
#Writing the output file
dst = cv2.addWeighted(dst,1,edges,1,0)
cv2.imwrite(os.path.join(output_path,file_name+'.png'),dst)
#Resizing the file to compare it with other methods
resized = cv2.resize(dst, (256,256), interpolation = cv2.INTER_AREA)
cv2.imwrite(os.path.join(output_path,'result_resized.png'),resized)
if __name__=="__main__":
#Input file path
path="dataset\\1_1_0.90179.png"
#loading the file
img = cv2.imread(path)
print(img)
#Extracting file name
file_name=".".join(path.split("\\")[-1].split(".")[:-1])
#calling the function
ImageEnhancement().image_enhancement(img,file_name) | """Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0) | identifier_body |
image_pyramid.py | import numpy as np
from matplotlib import pyplot as plt
import math
import cv2
import os
import shutil
import sys
from skimage.measure import compare_ssim
# from scipy.misc import imfilter
from skimage import color, data, restoration
from scipy.signal import convolve2d
from skimage.exposure import rescale_intensity
import numpy as np
import argparse
import cv2
# from PIL import Image
# from resizeimage import resizeimage
class ImageEnhancement:
def gaussian_blurring(self,input_image,kernel_size,sigma):
"""Get Guassian Blurred image.
@param input_image: The source image.
@param kernel_size: Size of the filter
@param sigma:Control variation around its mean value
@return The gaussian blurred image
"""
#Applying Gaussian Blur filter
output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)
return output_image
def sampling(self,input_image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
"""Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape |
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0)
def get_transmission(self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
print('Processing %s ...' % basename)
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
#Convert back to rgb
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
psf = np.ones((5, 5)) / 25
#Applying mean denoising filtering
dst=cv2.fastNlMeansDenoisingColored(final,None,10,10,7,21)
edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
print(edges.shape)
edges=cv2.resize(edges,(dst.shape[1],dst.shape[0]))
#Increasing the brightness of the image
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)
h,s,v=cv2.split(hsv)
value = 30 #whatever value you want to add
lim=255-value
s[s>lim]=255
s[s<lim]+=value
value1=30
lim1=255-value1
v[v>lim1]=255
v[v<lim1]+=value1
hsv = cv2.merge((h, s, v))
dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
#Writing the output file
dst = cv2.addWeighted(dst,1,edges,1,0)
cv2.imwrite(os.path.join(output_path,file_name+'.png'),dst)
#Resizing the file to compare it with other methods
resized = cv2.resize(dst, (256,256), interpolation = cv2.INTER_AREA)
cv2.imwrite(os.path.join(output_path,'result_resized.png'),resized)
if __name__=="__main__":
#Input file path
path="dataset\\1_1_0.90179.png"
#loading the file
img = cv2.imread(path)
print(img)
#Extracting file name
file_name=".".join(path.split("\\")[-1].split(".")[:-1])
#calling the function
ImageEnhancement().image_enhancement(img,file_name) | random_line_split | |
image_pyramid.py |
import numpy as np
from matplotlib import pyplot as plt
import math
import cv2
import os
import shutil
import sys
from skimage.measure import compare_ssim
# from scipy.misc import imfilter
from skimage import color, data, restoration
from scipy.signal import convolve2d
from skimage.exposure import rescale_intensity
import numpy as np
import argparse
import cv2
# from PIL import Image
# from resizeimage import resizeimage
class ImageEnhancement:
def gaussian_blurring(self,input_image,kernel_size,sigma):
"""Get Guassian Blurred image.
@param input_image: The source image.
@param kernel_size: Size of the filter
@param sigma:Control variation around its mean value
@return The gaussian blurred image
"""
#Applying Gaussian Blur filter
output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)
return output_image
def sampling(self,input_image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
"""Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0)
def | (self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
print('Processing %s ...' % basename)
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
#Convert back to rgb
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
psf = np.ones((5, 5)) / 25
#Applying mean denoising filtering
dst=cv2.fastNlMeansDenoisingColored(final,None,10,10,7,21)
edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
print(edges.shape)
edges=cv2.resize(edges,(dst.shape[1],dst.shape[0]))
#Increasing the brightness of the image
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)
h,s,v=cv2.split(hsv)
value = 30 #whatever value you want to add
lim=255-value
s[s>lim]=255
s[s<lim]+=value
value1=30
lim1=255-value1
v[v>lim1]=255
v[v<lim1]+=value1
hsv = cv2.merge((h, s, v))
dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
#Writing the output file
dst = cv2.addWeighted(dst,1,edges,1,0)
cv2.imwrite(os.path.join(output_path,file_name+'.png'),dst)
#Resizing the file to compare it with other methods
resized = cv2.resize(dst, (256,256), interpolation = cv2.INTER_AREA)
cv2.imwrite(os.path.join(output_path,'result_resized.png'),resized)
if __name__=="__main__":
#Input file path
path="dataset\\1_1_0.90179.png"
#loading the file
img = cv2.imread(path)
print(img)
#Extracting file name
file_name=".".join(path.split("\\")[-1].split(".")[:-1])
#calling the function
ImageEnhancement().image_enhancement(img,file_name) | get_transmission | identifier_name |
image_pyramid.py |
import numpy as np
from matplotlib import pyplot as plt
import math
import cv2
import os
import shutil
import sys
from skimage.measure import compare_ssim
# from scipy.misc import imfilter
from skimage import color, data, restoration
from scipy.signal import convolve2d
from skimage.exposure import rescale_intensity
import numpy as np
import argparse
import cv2
# from PIL import Image
# from resizeimage import resizeimage
class ImageEnhancement:
def gaussian_blurring(self,input_image,kernel_size,sigma):
"""Get Guassian Blurred image.
@param input_image: The source image.
@param kernel_size: Size of the filter
@param sigma:Control variation around its mean value
@return The gaussian blurred image
"""
#Applying Gaussian Blur filter
output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)
return output_image
def sampling(self,input_image,width,height):
"""Resizing the image
@param input_image: The source image.
@param width:Width of new image
@param height:Height of new image
@return The resized image
"""
#Resizing the image
output_image=cv2.resize(input_image,None,fx=width,fy=height)
return output_image
def get_dark_channel(self,img, *, size):
"""Get dark channel for an image.
@param img: The source image.
@param size: Patch size.
@return The dark channel of the image.
"""
#Extract the dark/hazy part from the image
minch = np.amin(img, axis=2)
box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))
return cv2.erode(minch, box)
def get_atmospheric_light(self,img, *, size, percent):
"""Estimate atmospheric light for an image.
@param img: the source image.
@param size: Patch size for calculating the dark channel.
@param percent: Percentage of brightest pixels in the dark channel
considered for the estimation.
@return The estimated atmospheric light.
"""
#Get the atmospheric light factor from the image
m, n, _ = img.shape
flat_img = img.reshape(m * n, 3)
flat_dark = self.get_dark_channel(img, size=size).ravel()
count = math.ceil(m * n * percent / 100)
indices = np.argpartition(flat_dark, -count)[:-count]
return np.amax(np.take(flat_img, indices, axis=0), axis=0)
def get_transmission(self,img, atmosphere, *, size, omega, radius, epsilon):
"""Estimate transmission map of an image.
@param img: The source image.
@param atmosphere: The atmospheric light for the image.
@param omega: Factor to preserve minor amounts of haze [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The transmission map for the source image.
"""
#Get transmission map from the image
division = np.float64(img) / np.float64(atmosphere)
raw = (1 - omega * self.get_dark_channel(division, size=size)).astype(np.float32)
return cv2.ximgproc.guidedFilter(img, raw, radius, epsilon)
def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):
"""Get recovered scene radiance for a hazy image.
@param img: The source image to be dehazed.
@param omega: (default: 0.95) Factor to preserve minor amounts of haze [1].
@param trans_lb: (default: 0.1) Lower bound for transmission [1].
@param size: (default: 15) Patch size for filtering etc [1].
@param percent: (default: 0.1) Percentage of pixels chosen to compute atmospheric light [1].
@param radius: (default: 40) Radius for the guided filter [2].
@param epsilon: (default: 0.001) Epsilon for the guided filter [2].
@return The final dehazed image.
"""
L=356
#Applying atmosheric scattering model on the image
atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)
trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)
clamped = np.clip(trans, trans_lb, omega)[:, :, None]
img = np.float64(img)
return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))
def process_imgdir(self,imgdir):
"""Get haze free images in the directory
@param imgdir: The source image directory.
@return All the haze free images.
"""
#Write images into resultdir
resultdir = os.path.join(imgdir, 'results')
#Read images from input dir
inputdir = os.path.join(imgdir, 'inputs')
shutil.rmtree(resultdir)
os.mkdir(resultdir)
#Read files from input images
for fullname in os.listdir(inputdir):
filepath = os.path.join(inputdir, fullname)
if os.path.isfile(filepath):
basename = os.path.basename(filepath)
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
if len(image.shape) == 3 and image.shape[2] == 3:
|
else:
sys.stderr.write('Skipping %s, not RGB' % basename)
continue
#Extract haze from the scene and then save the image
dehazed = self.get_scene_radiance(image)
cv2.imwrite(os.path.join(resultdir, basename), dehazed)
return os.path.join(resultdir, basename)
def image_enhancement(self,img,file_name):
"""Main function to call all the functions
@param img: Input image
@param file_name: The output file name
@return All the haze free images.
"""
#Creating output directory if it doesnt exist
dirname = 'output'
dir_path = os.path.dirname(os.path.realpath(__file__))
if(os.path.isdir(os.path.join(dir_path, dirname))):
if(os.path.exists(os.path.join(dir_path, dirname))):
pass
else:
os.mkdir(os.path.join(dir_path, dirname))
os.mkdir(os.path.join(dir_path, dirname,"results"))
os.mkdir(os.path.join(dir_path, dirname,"inputs"))
#Extracting edges using Canny's Edge Detection
edges = cv2.Canny(img,80,255)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)
kernel = (3,3)
#Applying image pyramid technique
#Applying Gaussian blur filter over the image
gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)
plt.subplot(121),
plt.xticks([]), plt.yticks([])
plt.subplot(122),
plt.xticks([]), plt.yticks([])
#Downsizing the image to 1/4th of its original size
coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)
#Upsampling the image to its original size
up_sampling=self.sampling(coarse_image,4,4)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)
#Applying Gaussian Blur filtering
gaus=self.gaussian_blurring(up_sampling,kernel,0)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)
#Resizing the image for image subtraction
gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))
#Convert into grayscale
gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)
#Converting to grayscale
dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)
diff = (diff * 255).astype("uint8")
#Image Subtraction
detail_image = cv2.subtract(gaus,gaussian_blurred_image)
cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)
print(detail_image.shape)
output_path=self.process_imgdir(os.path.join(dir_path, dirname))
dehazed_image=cv2.imread(output_path)
# dehazed_image =self.sampling(dehazed_image,4,4)
output_path="\\".join(output_path.split("\\")[:-1])
print(dehazed_image.shape)
cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image)
#Adding two images
dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
dst = cv2.filter2D(dst, -1, kernel)
#Converting images to lightness,chroma ,hue for increasing the brightness
lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
#Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
#Convert back to rgb
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
psf = np.ones((5, 5)) / 25
#Applying mean denoising filtering
dst=cv2.fastNlMeansDenoisingColored(final,None,10,10,7,21)
edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
print(edges.shape)
edges=cv2.resize(edges,(dst.shape[1],dst.shape[0]))
#Increasing the brightness of the image
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)
h,s,v=cv2.split(hsv)
value = 30 #whatever value you want to add
lim=255-value
s[s>lim]=255
s[s<lim]+=value
value1=30
lim1=255-value1
v[v>lim1]=255
v[v<lim1]+=value1
hsv = cv2.merge((h, s, v))
dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
#Writing the output file
dst = cv2.addWeighted(dst,1,edges,1,0)
cv2.imwrite(os.path.join(output_path,file_name+'.png'),dst)
#Resizing the file to compare it with other methods
resized = cv2.resize(dst, (256,256), interpolation = cv2.INTER_AREA)
cv2.imwrite(os.path.join(output_path,'result_resized.png'),resized)
if __name__=="__main__":
#Input file path
path="dataset\\1_1_0.90179.png"
#loading the file
img = cv2.imread(path)
print(img)
#Extracting file name
file_name=".".join(path.split("\\")[-1].split(".")[:-1])
#calling the function
ImageEnhancement().image_enhancement(img,file_name) | print('Processing %s ...' % basename) | conditional_block |
node.py | # Copyright 2018 IBM. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects for representing nodes in a GraphDef proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
import tensorflow as tf
from typing import Tuple, List, Iterable, Any
from pge import graph
from pge import tensor
__all__ = [
"Node",
"ImmutableNode",
"MutableNode",
]
class Node(object):
"""
Public API for interacting with graph nodes
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str,
op_name: str, outputs: List[tensor.Tensor],
device: str):
"""
This constructor should only be called by subclasses.
"""
self._graph = g
self._id = node_id
self._name = name
self._op_name = op_name
self._outputs = outputs
self._device = device
@property
def name(self):
"""
Returns:
Unique name of the operator that this Node represents
"""
return self._name
@property
def op_name(self):
"""
Returns:
Name of the TensorFlow op type that this Node represents
"""
return self._op_name
@property
def graph(self):
"""
Returns:
`pge.Graph` object representing the graph in which this Node resides.
"""
return self._graph
@property
def outputs(self):
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current outputs of this node. Note that this tuple does not change if
the underlying node is mutable and gets edited.
"""
return tuple(self._outputs)
def output(self, index: int):
"""
Args:
index: Index of an output of the node
Returns:
The Tensor corresponding to the indicated output of the node
"""
return self._outputs[index]
@property
def inputs(self) -> Tuple[tensor.Tensor]:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current inputs of this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def control_inputs(self) -> Tuple['Node']:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Node` objects representing the
nodes that have control edges to this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def device(self):
"""
Returns:
TensorFlow device placement string desribing where this node should be
placed, or None to specify use of the default device.
"""
return self._device
def to_node_def(self):
"""
Returns:
A copy of the contents of this node as a NodeDef proto. The returned
proto will *not* change if this node is changed after the call, and
vice versa.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr(self, key: str) -> Any:
"""
Retrieve the value of an attribute by name.
Args:
key: Key under which the node's attribute is stored
Returns:
Current value of the attribute as an appropriate native Python type
(NOT a `tf.AttrValue` protobuf) or None if no value was found.
Raises:
ValueError if the indicated key does not have an attribute associated
with it.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr_keys(self) -> Tuple[str]:
"""
Returns:
Tuple (immutable list) of the keys of all attributes currently present
in the node
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
class ImmutableNode(Node):
"""
Wrapper for tf.NodeDef. Also maintains a pointer back to wrapper object for
the original graph.
"""
def __init__(self, g: 'graph.Graph', node_id: int, node_def: tf.NodeDef,
outputs_list: List[Tuple[tf.DType, tf.shape]]):
"""
Args:
g: pge.Graph object that represents the parent graph
node_id: Unique (within parent graph) integer identifier for this node
node_def: tf.NodeDef protobuf
outputs_list: List of (type, shape) pairs that describe the outputs of
this node
"""
Node.__init__(self, g, node_id=node_id, name=node_def.name,
op_name=node_def.op,
outputs=[tensor.Tensor(self, i, outputs_list[i][0],
outputs_list[i][1])
for i in range(len(outputs_list))],
device=node_def.device)
self._node_def = node_def
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
# Regenerate each time for now.
return tuple(_decode_inputs(self._node_def.input, self._graph))
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
# For now, regenerate every time
return tuple(_decode_control_inputs(self._node_def.input, self._graph))
def get_attr(self, key: str):
if key not in self._node_def.attr:
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
return _attr_value_to_python_type(self._node_def.attr[key])
def get_attr_keys(self) -> Tuple[str]:
return tuple(self._node_def.attr)
def to_node_def(self):
return deepcopy(self._node_def)
class MutableNode(Node):
"""
Wrapper for a change to a graph that will add a node. Accumulates the
parameters of the node to be added and can produce an appropriate
tf.NodeDef protobuf on demand.
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,
device: str = ""):
"""
This constructor should only be called from methods of the Graph
class.
Args:
g: The graph that this node is to be added to. The caller is
responsible for adding the node to the graph.
node_id: Unique (within the parent graph) integer identifier for the node
name: Name of the new node to add
op_name: Name of the operation that the new node will perform
device: TensorFlow device specification string indicating where this node
should be located. Default value of "" means "use the default device"
"""
Node.__init__(self, g, node_id=node_id, name=name,
op_name=op_name, outputs=[], device=device)
self._attributes = []
self._inputs = []
self._control_inputs = []
def add_attr(self, key: str, value):
"""Add a single attribute to the underlying NodeDef's attr list.
Args:
key: Name of the attribute. Must be unique.
value: Value to put in place for the attribute. Must be one of the
following types:
* tf.DType
* tf.TensorShape
"""
if key in self._attr_names():
raise ValueError("Already have an attribute called '{}'".format(key))
self._attributes.append((key, value))
def get_attr(self, key: str):
# self._attributes is a list of (key, value) pairs
matches = [p[1] for p in self._attributes if p[0] == key]
if 0 == len(matches):
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
elif len(matches) > 1:
raise ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
|
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def to_node_def(self):
ret = tf.NodeDef()
ret.name = self.name
ret.op = self.op_name
for input_tensor in self.inputs:
ret.input.append(input_tensor.name)
for control_input_node in self.control_inputs:
ret.input.append("^" + control_input_node.name)
ret.device = self.device
for (attr_name, attr_value) in self._attributes:
# Funky syntax for setting a field of a union in a protobuf
ret.attr[attr_name].CopyFrom(_python_type_to_attr_value(attr_value))
return ret
def set_device(self, device: str):
self._device = device
################################################################################
# Stuff below this line is private to this file.
def _canonicalize_output_name(name: str):
"""
Args:
name: Name for an op output as it would appear in the protocol buffer
representation of a an operator graph
Returns:
A name in the form "<op name>:<output index>"
"""
if ":" in name:
return name
else:
return name + ":0"
def _decode_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
tensor.Tensor]:
"""
Extract and decode the inputs in a list of TensorFlow input specification
strings.
Skips over control inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Tensor` objects corresponding to each of the specified inputs.
"""
# Input names in the protobuf take three forms:
# "^node_name" --> Control input from indicated node
# "node_name" --> Input from output number 0 of indicated node
# "node_name:ix" --> Input from output number <ix> of indicated node
# Start by filtering out the control inputs and turning "node_name" into
# "node_name:0".
input_names = [_canonicalize_output_name(n) for n in inputs
if not n.startswith("^")]
input_tensors = []
for name in input_names:
# Name is in form "node:output number"
node_name, output_ix_name = name.split(":")
output_ix = int(output_ix_name)
input_tensors.append(g[node_name].output(output_ix))
return input_tensors
def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
Node]:
"""
Extract and decode the control inputs in a list of TensorFlow input
specification strings.
Skips data inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Node` objects corresponding to each of the control inputs.
"""
# Control inputs start with "^". Skip everything else and strip off the
# leading caret character
control_input_names = [n[1:] for n in inputs if n.startswith("^")]
return [g[name] for name in control_input_names]
def _python_type_to_attr_value(value: Any) -> tf.AttrValue:
"""
Convert a Python object or scalar value to a TensorFlow `tf.AttrValue`
protocol buffer message.
Args:
value: Python object to be converted
Returns:
An AttrValue object that wraps the contents of `value` in the most
appropriate way available.
"""
# TODO(frreiss): Handle AttrValues that are lists
if isinstance(value, tf.AttrValue):
# TODO(frreiss): Should this case result in an error?
return value
# Scalar types, in the order they appear in the .proto file
elif isinstance(value, str):
return tf.AttrValue(s=tf.compat.as_bytes(value))
elif isinstance(value, int):
return tf.AttrValue(i=value)
elif isinstance(value, float):
return tf.AttrValue(f=value)
elif isinstance(value, bool):
return tf.AttrValue(b=value)
elif isinstance(value, tf.DType):
return tf.AttrValue(type=value.as_datatype_enum())
elif isinstance(value, tf.TensorShape):
return tf.AttrValue(shape=value.as_proto())
elif isinstance(value, np.ndarray):
return tf.AttrValue(tensor=tf.make_tensor_proto(values=value))
# TODO(frreiss): Populate the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert a {} to "
"tf.AttrValue".format(type(value)))
def _attr_value_to_python_type(attr_value: tf.AttrValue) -> Any:
"""
Inverse of _python_type_to_attr_value().
Args:
attr_value: Protocol buffer version of a node's attribute value
Returns:
A Python object or built-in type corresponding to the field in
`attr_value` that is in use.
"""
# TODO(frreiss): Handle AttrValues that are lists
if attr_value.HasField("s"): # str
# TODO(frreiss): Should we return the binary value here?
return tf.compat.as_str(attr_value.s)
elif attr_value.HasField("i"): # int
return attr_value.i
elif attr_value.HasField("f"): # float
return attr_value.f
elif attr_value.HasField("b"): # bool
return attr_value.b
elif attr_value.HasField("type"): # DType
return tf.DType(attr_value.type)
elif attr_value.HasField("shape"): # TensorShape
# Undocumented behavior of public API: tf.TensorShape constructor accepts
# a TensorShapeProto.
return tf.TensorShape(attr_value.shape)
elif attr_value.HasField("tensor"): # TensorProto
return tf.make_ndarray(attr_value.tensor)
# TODO(frreiss): Convert the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert AttrValue {} to "
"a Python object".format(attr_value))
| return tuple([p[0] for p in self._attributes]) | identifier_body |
node.py | # Copyright 2018 IBM. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects for representing nodes in a GraphDef proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
import tensorflow as tf
from typing import Tuple, List, Iterable, Any
from pge import graph
from pge import tensor
__all__ = [
"Node",
"ImmutableNode",
"MutableNode",
]
class Node(object):
"""
Public API for interacting with graph nodes
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str,
op_name: str, outputs: List[tensor.Tensor],
device: str):
"""
This constructor should only be called by subclasses.
"""
self._graph = g
self._id = node_id
self._name = name
self._op_name = op_name
self._outputs = outputs
self._device = device
@property
def name(self):
"""
Returns:
Unique name of the operator that this Node represents
"""
return self._name
@property
def op_name(self):
"""
Returns:
Name of the TensorFlow op type that this Node represents
"""
return self._op_name
@property
def graph(self):
"""
Returns:
`pge.Graph` object representing the graph in which this Node resides.
"""
return self._graph
@property
def outputs(self):
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current outputs of this node. Note that this tuple does not change if
the underlying node is mutable and gets edited.
"""
return tuple(self._outputs)
def output(self, index: int):
"""
Args:
index: Index of an output of the node
Returns:
The Tensor corresponding to the indicated output of the node
"""
return self._outputs[index]
@property
def inputs(self) -> Tuple[tensor.Tensor]:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current inputs of this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def control_inputs(self) -> Tuple['Node']:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Node` objects representing the
nodes that have control edges to this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def device(self):
"""
Returns:
TensorFlow device placement string desribing where this node should be
placed, or None to specify use of the default device.
"""
return self._device
def to_node_def(self):
"""
Returns:
A copy of the contents of this node as a NodeDef proto. The returned
proto will *not* change if this node is changed after the call, and
vice versa.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr(self, key: str) -> Any:
"""
Retrieve the value of an attribute by name.
Args:
key: Key under which the node's attribute is stored
Returns:
Current value of the attribute as an appropriate native Python type
(NOT a `tf.AttrValue` protobuf) or None if no value was found.
Raises:
ValueError if the indicated key does not have an attribute associated
with it.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr_keys(self) -> Tuple[str]:
"""
Returns:
Tuple (immutable list) of the keys of all attributes currently present
in the node
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
class ImmutableNode(Node):
"""
Wrapper for tf.NodeDef. Also maintains a pointer back to wrapper object for
the original graph.
"""
def __init__(self, g: 'graph.Graph', node_id: int, node_def: tf.NodeDef,
outputs_list: List[Tuple[tf.DType, tf.shape]]):
"""
Args:
g: pge.Graph object that represents the parent graph
node_id: Unique (within parent graph) integer identifier for this node
node_def: tf.NodeDef protobuf
outputs_list: List of (type, shape) pairs that describe the outputs of
this node
"""
Node.__init__(self, g, node_id=node_id, name=node_def.name,
op_name=node_def.op,
outputs=[tensor.Tensor(self, i, outputs_list[i][0],
outputs_list[i][1])
for i in range(len(outputs_list))],
device=node_def.device)
self._node_def = node_def
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
# Regenerate each time for now.
return tuple(_decode_inputs(self._node_def.input, self._graph))
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
# For now, regenerate every time
return tuple(_decode_control_inputs(self._node_def.input, self._graph))
def get_attr(self, key: str):
if key not in self._node_def.attr:
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
return _attr_value_to_python_type(self._node_def.attr[key])
def get_attr_keys(self) -> Tuple[str]:
return tuple(self._node_def.attr)
def to_node_def(self):
return deepcopy(self._node_def)
class MutableNode(Node):
"""
Wrapper for a change to a graph that will add a node. Accumulates the
parameters of the node to be added and can produce an appropriate
tf.NodeDef protobuf on demand.
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,
device: str = ""):
"""
This constructor should only be called from methods of the Graph
class.
Args:
g: The graph that this node is to be added to. The caller is
responsible for adding the node to the graph.
node_id: Unique (within the parent graph) integer identifier for the node
name: Name of the new node to add
op_name: Name of the operation that the new node will perform
device: TensorFlow device specification string indicating where this node
should be located. Default value of "" means "use the default device"
"""
Node.__init__(self, g, node_id=node_id, name=name,
op_name=op_name, outputs=[], device=device)
self._attributes = []
self._inputs = []
self._control_inputs = []
def add_attr(self, key: str, value):
"""Add a single attribute to the underlying NodeDef's attr list.
Args:
key: Name of the attribute. Must be unique.
value: Value to put in place for the attribute. Must be one of the
following types:
* tf.DType
* tf.TensorShape
"""
if key in self._attr_names():
raise ValueError("Already have an attribute called '{}'".format(key))
self._attributes.append((key, value))
def get_attr(self, key: str):
# self._attributes is a list of (key, value) pairs
matches = [p[1] for p in self._attributes if p[0] == key]
if 0 == len(matches):
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
elif len(matches) > 1:
raise ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
return tuple([p[0] for p in self._attributes])
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def | (self):
ret = tf.NodeDef()
ret.name = self.name
ret.op = self.op_name
for input_tensor in self.inputs:
ret.input.append(input_tensor.name)
for control_input_node in self.control_inputs:
ret.input.append("^" + control_input_node.name)
ret.device = self.device
for (attr_name, attr_value) in self._attributes:
# Funky syntax for setting a field of a union in a protobuf
ret.attr[attr_name].CopyFrom(_python_type_to_attr_value(attr_value))
return ret
def set_device(self, device: str):
self._device = device
################################################################################
# Stuff below this line is private to this file.
def _canonicalize_output_name(name: str):
"""
Args:
name: Name for an op output as it would appear in the protocol buffer
representation of a an operator graph
Returns:
A name in the form "<op name>:<output index>"
"""
if ":" in name:
return name
else:
return name + ":0"
def _decode_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
tensor.Tensor]:
"""
Extract and decode the inputs in a list of TensorFlow input specification
strings.
Skips over control inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Tensor` objects corresponding to each of the specified inputs.
"""
# Input names in the protobuf take three forms:
# "^node_name" --> Control input from indicated node
# "node_name" --> Input from output number 0 of indicated node
# "node_name:ix" --> Input from output number <ix> of indicated node
# Start by filtering out the control inputs and turning "node_name" into
# "node_name:0".
input_names = [_canonicalize_output_name(n) for n in inputs
if not n.startswith("^")]
input_tensors = []
for name in input_names:
# Name is in form "node:output number"
node_name, output_ix_name = name.split(":")
output_ix = int(output_ix_name)
input_tensors.append(g[node_name].output(output_ix))
return input_tensors
def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
Node]:
"""
Extract and decode the control inputs in a list of TensorFlow input
specification strings.
Skips data inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Node` objects corresponding to each of the control inputs.
"""
# Control inputs start with "^". Skip everything else and strip off the
# leading caret character
control_input_names = [n[1:] for n in inputs if n.startswith("^")]
return [g[name] for name in control_input_names]
def _python_type_to_attr_value(value: Any) -> tf.AttrValue:
"""
Convert a Python object or scalar value to a TensorFlow `tf.AttrValue`
protocol buffer message.
Args:
value: Python object to be converted
Returns:
An AttrValue object that wraps the contents of `value` in the most
appropriate way available.
"""
# TODO(frreiss): Handle AttrValues that are lists
if isinstance(value, tf.AttrValue):
# TODO(frreiss): Should this case result in an error?
return value
# Scalar types, in the order they appear in the .proto file
elif isinstance(value, str):
return tf.AttrValue(s=tf.compat.as_bytes(value))
elif isinstance(value, int):
return tf.AttrValue(i=value)
elif isinstance(value, float):
return tf.AttrValue(f=value)
elif isinstance(value, bool):
return tf.AttrValue(b=value)
elif isinstance(value, tf.DType):
return tf.AttrValue(type=value.as_datatype_enum())
elif isinstance(value, tf.TensorShape):
return tf.AttrValue(shape=value.as_proto())
elif isinstance(value, np.ndarray):
return tf.AttrValue(tensor=tf.make_tensor_proto(values=value))
# TODO(frreiss): Populate the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert a {} to "
"tf.AttrValue".format(type(value)))
def _attr_value_to_python_type(attr_value: tf.AttrValue) -> Any:
"""
Inverse of _python_type_to_attr_value().
Args:
attr_value: Protocol buffer version of a node's attribute value
Returns:
A Python object or built-in type corresponding to the field in
`attr_value` that is in use.
"""
# TODO(frreiss): Handle AttrValues that are lists
if attr_value.HasField("s"): # str
# TODO(frreiss): Should we return the binary value here?
return tf.compat.as_str(attr_value.s)
elif attr_value.HasField("i"): # int
return attr_value.i
elif attr_value.HasField("f"): # float
return attr_value.f
elif attr_value.HasField("b"): # bool
return attr_value.b
elif attr_value.HasField("type"): # DType
return tf.DType(attr_value.type)
elif attr_value.HasField("shape"): # TensorShape
# Undocumented behavior of public API: tf.TensorShape constructor accepts
# a TensorShapeProto.
return tf.TensorShape(attr_value.shape)
elif attr_value.HasField("tensor"): # TensorProto
return tf.make_ndarray(attr_value.tensor)
# TODO(frreiss): Convert the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert AttrValue {} to "
"a Python object".format(attr_value))
| to_node_def | identifier_name |
node.py | # Copyright 2018 IBM. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects for representing nodes in a GraphDef proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
import tensorflow as tf
from typing import Tuple, List, Iterable, Any
from pge import graph
from pge import tensor
__all__ = [
"Node",
"ImmutableNode",
"MutableNode",
]
class Node(object):
"""
Public API for interacting with graph nodes
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str,
op_name: str, outputs: List[tensor.Tensor],
device: str):
"""
This constructor should only be called by subclasses.
"""
self._graph = g
self._id = node_id
self._name = name
self._op_name = op_name
self._outputs = outputs
self._device = device
@property
def name(self):
"""
Returns:
Unique name of the operator that this Node represents
"""
return self._name
@property
def op_name(self):
"""
Returns:
Name of the TensorFlow op type that this Node represents
"""
return self._op_name
@property
def graph(self):
"""
Returns:
`pge.Graph` object representing the graph in which this Node resides.
"""
return self._graph
@property
def outputs(self):
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current outputs of this node. Note that this tuple does not change if
the underlying node is mutable and gets edited.
"""
return tuple(self._outputs)
def output(self, index: int):
"""
Args:
index: Index of an output of the node
Returns:
The Tensor corresponding to the indicated output of the node
"""
return self._outputs[index]
@property
def inputs(self) -> Tuple[tensor.Tensor]:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current inputs of this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def control_inputs(self) -> Tuple['Node']:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Node` objects representing the
nodes that have control edges to this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def device(self):
"""
Returns:
TensorFlow device placement string desribing where this node should be
placed, or None to specify use of the default device.
"""
return self._device
def to_node_def(self):
"""
Returns:
A copy of the contents of this node as a NodeDef proto. The returned
proto will *not* change if this node is changed after the call, and
vice versa.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr(self, key: str) -> Any:
"""
Retrieve the value of an attribute by name.
Args:
key: Key under which the node's attribute is stored
Returns:
Current value of the attribute as an appropriate native Python type
(NOT a `tf.AttrValue` protobuf) or None if no value was found.
Raises:
ValueError if the indicated key does not have an attribute associated
with it.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr_keys(self) -> Tuple[str]:
"""
Returns:
Tuple (immutable list) of the keys of all attributes currently present
in the node
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
class ImmutableNode(Node):
"""
Wrapper for tf.NodeDef. Also maintains a pointer back to wrapper object for
the original graph.
"""
def __init__(self, g: 'graph.Graph', node_id: int, node_def: tf.NodeDef,
outputs_list: List[Tuple[tf.DType, tf.shape]]):
"""
Args:
g: pge.Graph object that represents the parent graph
node_id: Unique (within parent graph) integer identifier for this node
node_def: tf.NodeDef protobuf
outputs_list: List of (type, shape) pairs that describe the outputs of
this node
"""
Node.__init__(self, g, node_id=node_id, name=node_def.name,
op_name=node_def.op,
outputs=[tensor.Tensor(self, i, outputs_list[i][0],
outputs_list[i][1])
for i in range(len(outputs_list))],
device=node_def.device)
self._node_def = node_def
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
# Regenerate each time for now.
return tuple(_decode_inputs(self._node_def.input, self._graph))
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
# For now, regenerate every time
return tuple(_decode_control_inputs(self._node_def.input, self._graph))
def get_attr(self, key: str):
if key not in self._node_def.attr:
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
return _attr_value_to_python_type(self._node_def.attr[key])
def get_attr_keys(self) -> Tuple[str]:
return tuple(self._node_def.attr)
def to_node_def(self):
return deepcopy(self._node_def)
class MutableNode(Node):
"""
Wrapper for a change to a graph that will add a node. Accumulates the
parameters of the node to be added and can produce an appropriate
tf.NodeDef protobuf on demand.
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,
device: str = ""):
"""
This constructor should only be called from methods of the Graph
class.
Args:
g: The graph that this node is to be added to. The caller is
responsible for adding the node to the graph.
node_id: Unique (within the parent graph) integer identifier for the node
name: Name of the new node to add
op_name: Name of the operation that the new node will perform
device: TensorFlow device specification string indicating where this node
should be located. Default value of "" means "use the default device"
"""
Node.__init__(self, g, node_id=node_id, name=name,
op_name=op_name, outputs=[], device=device)
self._attributes = []
self._inputs = []
self._control_inputs = []
def add_attr(self, key: str, value):
"""Add a single attribute to the underlying NodeDef's attr list.
Args:
key: Name of the attribute. Must be unique.
value: Value to put in place for the attribute. Must be one of the
following types:
* tf.DType
* tf.TensorShape
"""
if key in self._attr_names():
raise ValueError("Already have an attribute called '{}'".format(key))
self._attributes.append((key, value))
def get_attr(self, key: str):
# self._attributes is a list of (key, value) pairs
matches = [p[1] for p in self._attributes if p[0] == key]
if 0 == len(matches):
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
elif len(matches) > 1:
raise ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
return tuple([p[0] for p in self._attributes])
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def to_node_def(self):
ret = tf.NodeDef()
ret.name = self.name
ret.op = self.op_name
for input_tensor in self.inputs:
ret.input.append(input_tensor.name)
for control_input_node in self.control_inputs:
ret.input.append("^" + control_input_node.name)
ret.device = self.device
for (attr_name, attr_value) in self._attributes:
# Funky syntax for setting a field of a union in a protobuf
ret.attr[attr_name].CopyFrom(_python_type_to_attr_value(attr_value))
return ret
def set_device(self, device: str):
self._device = device
################################################################################
# Stuff below this line is private to this file.
def _canonicalize_output_name(name: str):
"""
Args:
name: Name for an op output as it would appear in the protocol buffer
representation of a an operator graph
Returns:
A name in the form "<op name>:<output index>"
"""
if ":" in name:
return name
else:
return name + ":0"
def _decode_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
tensor.Tensor]:
"""
Extract and decode the inputs in a list of TensorFlow input specification
strings.
Skips over control inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Tensor` objects corresponding to each of the specified inputs.
"""
# Input names in the protobuf take three forms:
# "^node_name" --> Control input from indicated node
# "node_name" --> Input from output number 0 of indicated node
# "node_name:ix" --> Input from output number <ix> of indicated node
# Start by filtering out the control inputs and turning "node_name" into
# "node_name:0".
input_names = [_canonicalize_output_name(n) for n in inputs
if not n.startswith("^")]
input_tensors = []
for name in input_names:
# Name is in form "node:output number"
node_name, output_ix_name = name.split(":")
output_ix = int(output_ix_name)
input_tensors.append(g[node_name].output(output_ix))
return input_tensors
def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
Node]:
"""
Extract and decode the control inputs in a list of TensorFlow input
specification strings.
Skips data inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Node` objects corresponding to each of the control inputs.
"""
# Control inputs start with "^". Skip everything else and strip off the
# leading caret character
control_input_names = [n[1:] for n in inputs if n.startswith("^")]
return [g[name] for name in control_input_names]
def _python_type_to_attr_value(value: Any) -> tf.AttrValue:
"""
Convert a Python object or scalar value to a TensorFlow `tf.AttrValue`
protocol buffer message.
Args:
value: Python object to be converted
Returns:
An AttrValue object that wraps the contents of `value` in the most
appropriate way available.
"""
# TODO(frreiss): Handle AttrValues that are lists
if isinstance(value, tf.AttrValue):
# TODO(frreiss): Should this case result in an error?
return value
# Scalar types, in the order they appear in the .proto file
elif isinstance(value, str):
return tf.AttrValue(s=tf.compat.as_bytes(value))
elif isinstance(value, int):
return tf.AttrValue(i=value)
elif isinstance(value, float):
return tf.AttrValue(f=value)
elif isinstance(value, bool):
return tf.AttrValue(b=value)
elif isinstance(value, tf.DType):
return tf.AttrValue(type=value.as_datatype_enum())
elif isinstance(value, tf.TensorShape):
return tf.AttrValue(shape=value.as_proto())
elif isinstance(value, np.ndarray):
return tf.AttrValue(tensor=tf.make_tensor_proto(values=value))
# TODO(frreiss): Populate the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert a {} to "
"tf.AttrValue".format(type(value)))
def _attr_value_to_python_type(attr_value: tf.AttrValue) -> Any:
"""
Inverse of _python_type_to_attr_value().
Args:
attr_value: Protocol buffer version of a node's attribute value
Returns:
A Python object or built-in type corresponding to the field in
`attr_value` that is in use.
"""
# TODO(frreiss): Handle AttrValues that are lists
if attr_value.HasField("s"): # str
# TODO(frreiss): Should we return the binary value here?
return tf.compat.as_str(attr_value.s)
elif attr_value.HasField("i"): # int
return attr_value.i
elif attr_value.HasField("f"): # float
return attr_value.f
elif attr_value.HasField("b"): # bool
return attr_value.b
elif attr_value.HasField("type"): # DType
|
elif attr_value.HasField("shape"): # TensorShape
# Undocumented behavior of public API: tf.TensorShape constructor accepts
# a TensorShapeProto.
return tf.TensorShape(attr_value.shape)
elif attr_value.HasField("tensor"): # TensorProto
return tf.make_ndarray(attr_value.tensor)
# TODO(frreiss): Convert the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert AttrValue {} to "
"a Python object".format(attr_value))
| return tf.DType(attr_value.type) | conditional_block |
node.py | # Copyright 2018 IBM. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objects for representing nodes in a GraphDef proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
import tensorflow as tf
from typing import Tuple, List, Iterable, Any
from pge import graph
from pge import tensor
__all__ = [
"Node",
"ImmutableNode",
"MutableNode",
]
class Node(object):
"""
Public API for interacting with graph nodes
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str,
op_name: str, outputs: List[tensor.Tensor],
device: str):
"""
This constructor should only be called by subclasses.
"""
self._graph = g
self._id = node_id
self._name = name
self._op_name = op_name
self._outputs = outputs
self._device = device
@property
def name(self):
"""
Returns:
Unique name of the operator that this Node represents
"""
return self._name
@property
def op_name(self):
"""
Returns:
Name of the TensorFlow op type that this Node represents
"""
return self._op_name
@property
def graph(self):
"""
Returns:
`pge.Graph` object representing the graph in which this Node resides.
"""
return self._graph
@property
def outputs(self):
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current outputs of this node. Note that this tuple does not change if
the underlying node is mutable and gets edited.
"""
return tuple(self._outputs)
def output(self, index: int):
"""
Args:
index: Index of an output of the node
Returns:
The Tensor corresponding to the indicated output of the node
"""
return self._outputs[index]
@property
def inputs(self) -> Tuple[tensor.Tensor]:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Tensor` objects representing the
current inputs of this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def control_inputs(self) -> Tuple['Node']:
"""
Returns:
Tuple (i.e. immutable list) of `pge.Node` objects representing the
nodes that have control edges to this node.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
@property
def device(self):
"""
Returns:
TensorFlow device placement string desribing where this node should be
placed, or None to specify use of the default device.
"""
return self._device
def to_node_def(self):
"""
Returns:
A copy of the contents of this node as a NodeDef proto. The returned
proto will *not* change if this node is changed after the call, and
vice versa.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr(self, key: str) -> Any:
"""
Retrieve the value of an attribute by name.
Args:
key: Key under which the node's attribute is stored
Returns:
Current value of the attribute as an appropriate native Python type
(NOT a `tf.AttrValue` protobuf) or None if no value was found.
Raises:
ValueError if the indicated key does not have an attribute associated
with it.
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
def get_attr_keys(self) -> Tuple[str]:
"""
Returns:
Tuple (immutable list) of the keys of all attributes currently present
in the node
"""
raise NotImplementedError("This method should be implemented by "
"subclasses.")
class ImmutableNode(Node):
"""
Wrapper for tf.NodeDef. Also maintains a pointer back to wrapper object for
the original graph.
"""
def __init__(self, g: 'graph.Graph', node_id: int, node_def: tf.NodeDef,
outputs_list: List[Tuple[tf.DType, tf.shape]]):
"""
Args:
g: pge.Graph object that represents the parent graph
node_id: Unique (within parent graph) integer identifier for this node
node_def: tf.NodeDef protobuf
outputs_list: List of (type, shape) pairs that describe the outputs of
this node
"""
Node.__init__(self, g, node_id=node_id, name=node_def.name,
op_name=node_def.op,
outputs=[tensor.Tensor(self, i, outputs_list[i][0],
outputs_list[i][1])
for i in range(len(outputs_list))],
device=node_def.device)
self._node_def = node_def
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
# Regenerate each time for now.
return tuple(_decode_inputs(self._node_def.input, self._graph))
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
# For now, regenerate every time
return tuple(_decode_control_inputs(self._node_def.input, self._graph))
def get_attr(self, key: str):
if key not in self._node_def.attr:
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
return _attr_value_to_python_type(self._node_def.attr[key])
def get_attr_keys(self) -> Tuple[str]:
return tuple(self._node_def.attr)
def to_node_def(self):
return deepcopy(self._node_def)
class MutableNode(Node):
"""
Wrapper for a change to a graph that will add a node. Accumulates the
parameters of the node to be added and can produce an appropriate
tf.NodeDef protobuf on demand.
"""
def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,
device: str = ""):
"""
This constructor should only be called from methods of the Graph
class.
Args:
g: The graph that this node is to be added to. The caller is
responsible for adding the node to the graph.
node_id: Unique (within the parent graph) integer identifier for the node
name: Name of the new node to add
op_name: Name of the operation that the new node will perform
device: TensorFlow device specification string indicating where this node
should be located. Default value of "" means "use the default device"
"""
Node.__init__(self, g, node_id=node_id, name=name,
op_name=op_name, outputs=[], device=device)
self._attributes = []
self._inputs = []
self._control_inputs = []
def add_attr(self, key: str, value):
"""Add a single attribute to the underlying NodeDef's attr list.
Args:
key: Name of the attribute. Must be unique.
value: Value to put in place for the attribute. Must be one of the
following types:
* tf.DType
* tf.TensorShape
"""
if key in self._attr_names():
raise ValueError("Already have an attribute called '{}'".format(key))
self._attributes.append((key, value))
def get_attr(self, key: str):
# self._attributes is a list of (key, value) pairs
matches = [p[1] for p in self._attributes if p[0] == key]
if 0 == len(matches):
raise ValueError("Node {} does not have an attribute "
"under key '{}'".format(self, key))
elif len(matches) > 1:
raise ValueError("Node {} has more than one attribute "
"under key '{}'".format(self, key))
ret = matches[0]
if isinstance(ret, tf.AttrValue):
return _attr_value_to_python_type(ret)
else:
return ret
def get_attr_keys(self) -> Tuple[str]:
return tuple([p[0] for p in self._attributes])
def clear_attrs(self):
"""
Remove any attributes that are attached to this node.
"""
self._attributes.clear()
def _attr_names(self):
return [a[0] for a in self._attributes]
@Node.inputs.getter
def inputs(self) -> Tuple[tensor.Tensor]:
return tuple(self._inputs)
def set_inputs(self, new_inputs: Iterable[tensor.Tensor]):
"""
Set all inputs at once, removing anything that was there previously.
Args:
new_inputs: Iterable of `Tensor` objects in this node's parent graph
"""
for t in new_inputs:
if t.graph != self.graph:
raise ValueError("Tensor {} points to graph {}, but this node is in a "
"different graph {}".format(t, t.graph, self.graph))
self._inputs = list(new_inputs)
self._graph.increment_version_counter() # New edges added to graph
def set_control_inputs(self, new_control_inputs: Iterable[Node]):
"""
Set all control inputs at once, removing anything that was there
previously.
Args:
new_control_inputs: Iterable of `Node` objects in this node's parent graph
"""
self._control_inputs = list(new_control_inputs)
def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,
tf.shape]]):
"""
Set all outputs at once, removing anything that was there previously.
Note that information about outputs is not stored in the serialized graph.
When instantiating a serialized graph, TensorFlow will use its own shape
inference to infer the number, type, and shape of the operator's outputs.
Args:
new_outputs: Iterable of (dtype, shape) pairs that describe the outputs
"""
self._outputs = []
i = 0
for (dtype, shape) in new_outputs:
self._outputs.append(tensor.Tensor(self, i, dtype, shape))
i += 1
self._graph.increment_version_counter() # Just in case
def infer_outputs(self):
"""
Use TensorFlow's shape and dtype inference to determine the number of
outputs as well as their shapes and dtypes, based on the node's op type
string, its attribute values, and what inputs are connected to it.
Inference will only function properly if the currently-loaded version of
TensorFlow knows about the specified op type and the current
configuration of this op's inputs is compatible with the combination of
op type string and parameters.
Overwrites the previous value of the `outputs` property.
Raises:
TBD
"""
# TF lack a supported API for invoking shape inference directly,
# so we instantiate a dummy graph and create a dummy Operation object
temp_graph = tf.Graph()
with temp_graph.as_default():
input_placeholders = [tf.placeholder(shape=t.shape, dtype=t.dtype) for
t in self._inputs]
# See the docs for tf.Operation for important notes about the semantics
# of each arg to the following constructor.
dummy_op = tf.Operation(self.to_node_def(), temp_graph,
inputs=input_placeholders)
self.set_outputs_from_pairs([(o.dtype, o.shape)
for o in dummy_op.outputs])
# set_outputs_from_pairs() increments the version counter, so we don't
# need to. Also, we haven't added edges to the graph until these
# outputs are connected to another node's inputs.
def set_inputs_from_strings(self, new_inputs: Iterable[str],
set_control_inputs: bool = True):
"""
Set all input at once, converting TensorFlow string-format inputs into
`Tensor` objects. All nodes referenced in the input strings must be
present in the parent graph.
Args:
new_inputs: Input description strings in the format that they appear in a
`tf.NodeDef` protocol buffer.
set_control_inputs: If True, replace existing control inputs for this
node with any control inputs specified in the input strings.
Otherwise , this method will ignore any strings that describe control
inputs.
"""
self._inputs = _decode_inputs(new_inputs, self._graph)
if set_control_inputs:
self._control_inputs = _decode_control_inputs(new_inputs, self._graph)
self._graph.increment_version_counter() # New edges added to graph
@Node.control_inputs.getter
def control_inputs(self) -> Tuple[Node]:
return tuple(self._control_inputs)
def to_node_def(self):
ret = tf.NodeDef()
ret.name = self.name
ret.op = self.op_name
for input_tensor in self.inputs:
ret.input.append(input_tensor.name)
for control_input_node in self.control_inputs:
ret.input.append("^" + control_input_node.name)
ret.device = self.device
for (attr_name, attr_value) in self._attributes:
# Funky syntax for setting a field of a union in a protobuf
ret.attr[attr_name].CopyFrom(_python_type_to_attr_value(attr_value))
return ret
def set_device(self, device: str):
self._device = device
################################################################################
# Stuff below this line is private to this file.
def _canonicalize_output_name(name: str):
"""
Args:
name: Name for an op output as it would appear in the protocol buffer
representation of a an operator graph
Returns:
A name in the form "<op name>:<output index>"
"""
if ":" in name:
return name
else:
return name + ":0"
def _decode_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
tensor.Tensor]:
"""
Extract and decode the inputs in a list of TensorFlow input specification
strings.
Skips over control inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Tensor` objects corresponding to each of the specified inputs.
"""
# Input names in the protobuf take three forms:
# "^node_name" --> Control input from indicated node
# "node_name" --> Input from output number 0 of indicated node
# "node_name:ix" --> Input from output number <ix> of indicated node
# Start by filtering out the control inputs and turning "node_name" into
# "node_name:0".
input_names = [_canonicalize_output_name(n) for n in inputs
if not n.startswith("^")]
input_tensors = []
for name in input_names:
# Name is in form "node:output number"
node_name, output_ix_name = name.split(":")
output_ix = int(output_ix_name)
input_tensors.append(g[node_name].output(output_ix))
return input_tensors
def _decode_control_inputs(inputs: Iterable[str], g: 'graph.Graph') -> List[
Node]:
"""
Extract and decode the control inputs in a list of TensorFlow input
specification strings.
Skips data inputs.
Args:
inputs: List of strings specifying data and/or control inputs,
as serialized in `tf.NodeDef` protocol buffers.
g: Reference to a `Graph` object that must have nodes corresponding
to all inputs in the inputs list.
Returns:
A list of `Node` objects corresponding to each of the control inputs.
"""
# Control inputs start with "^". Skip everything else and strip off the
# leading caret character
control_input_names = [n[1:] for n in inputs if n.startswith("^")]
return [g[name] for name in control_input_names]
def _python_type_to_attr_value(value: Any) -> tf.AttrValue:
"""
Convert a Python object or scalar value to a TensorFlow `tf.AttrValue`
protocol buffer message.
Args:
value: Python object to be converted
Returns:
An AttrValue object that wraps the contents of `value` in the most
appropriate way available.
"""
# TODO(frreiss): Handle AttrValues that are lists
if isinstance(value, tf.AttrValue):
# TODO(frreiss): Should this case result in an error?
return value
# Scalar types, in the order they appear in the .proto file
elif isinstance(value, str):
return tf.AttrValue(s=tf.compat.as_bytes(value))
elif isinstance(value, int):
return tf.AttrValue(i=value)
elif isinstance(value, float):
return tf.AttrValue(f=value)
elif isinstance(value, bool):
return tf.AttrValue(b=value)
elif isinstance(value, tf.DType):
return tf.AttrValue(type=value.as_datatype_enum())
elif isinstance(value, tf.TensorShape):
return tf.AttrValue(shape=value.as_proto())
elif isinstance(value, np.ndarray):
return tf.AttrValue(tensor=tf.make_tensor_proto(values=value))
# TODO(frreiss): Populate the "func" and "placeholder" fields of the union
# here | "tf.AttrValue".format(type(value)))
def _attr_value_to_python_type(attr_value: tf.AttrValue) -> Any:
"""
Inverse of _python_type_to_attr_value().
Args:
attr_value: Protocol buffer version of a node's attribute value
Returns:
A Python object or built-in type corresponding to the field in
`attr_value` that is in use.
"""
# TODO(frreiss): Handle AttrValues that are lists
if attr_value.HasField("s"): # str
# TODO(frreiss): Should we return the binary value here?
return tf.compat.as_str(attr_value.s)
elif attr_value.HasField("i"): # int
return attr_value.i
elif attr_value.HasField("f"): # float
return attr_value.f
elif attr_value.HasField("b"): # bool
return attr_value.b
elif attr_value.HasField("type"): # DType
return tf.DType(attr_value.type)
elif attr_value.HasField("shape"): # TensorShape
# Undocumented behavior of public API: tf.TensorShape constructor accepts
# a TensorShapeProto.
return tf.TensorShape(attr_value.shape)
elif attr_value.HasField("tensor"): # TensorProto
return tf.make_ndarray(attr_value.tensor)
# TODO(frreiss): Convert the "func" and "placeholder" fields of the union
# here
else:
raise ValueError("Don't know how to convert AttrValue {} to "
"a Python object".format(attr_value)) | else:
raise ValueError("Don't know how to convert a {} to " | random_line_split |
goto_definition.rs | use super::NavigationTarget;
use crate::def::{AstPtr, Expr, Literal, ResolveResult};
use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath};
use nix_interop::FLAKE_FILE;
use syntax::ast::{self, AstNode};
use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum GotoDefinitionResult {
Path(VfsPath),
Targets(Vec<NavigationTarget>),
}
pub(crate) fn goto_definition(
db: &dyn DefDatabase,
FilePos { file_id, pos }: FilePos,
) -> Option<GotoDefinitionResult> {
let parse = db.parse(file_id);
let tok = best_token_at_offset(&parse.syntax_node(), pos)?;
// Special case for goto flake inputs.
if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) {
return Some(ret);
}
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) |
#[track_caller]
fn check(fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }",
expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }",
expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs.$0nixpkgs.url = "github:NixOS/nixpkgs";
inputs.nix.url = "github:NixOS/nix";
output = { ... }: { };
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// Flake input in string form.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs = {
nixpkgs = { url = "github:NixOS/nixpkgs"; };
"n$0ix" = { url = "github:NixOS/nix"; };
};
output = { ... }: { };
}
"#,
expect!["file:///nix/store/oooo/flake.nix"],
);
// Not a flake input.
check_no(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
inputs'.$0nixpkgs.no = 42;
}
"#,
);
// Not a flake input.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
outputs = { nixpkgs, ... }: $0nixpkgs;
"#,
expect!["{ <nixpkgs>, ... }: nixpkgs"],
);
}
#[test]
fn flake_output_pat() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
outputs = { $0nixpkgs, ... }: nixpkgs;
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// `self` in parameter is no an input.
check_no(
r#"
#- /flake.nix input:self=/nix/store/eeee
{
outputs = { $0self, ... }: self;
}
"#,
);
}
}
| {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
} | identifier_body |
goto_definition.rs | use super::NavigationTarget;
use crate::def::{AstPtr, Expr, Literal, ResolveResult};
use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath};
use nix_interop::FLAKE_FILE;
use syntax::ast::{self, AstNode};
use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum GotoDefinitionResult {
Path(VfsPath),
Targets(Vec<NavigationTarget>),
}
pub(crate) fn goto_definition(
db: &dyn DefDatabase,
FilePos { file_id, pos }: FilePos,
) -> Option<GotoDefinitionResult> {
let parse = db.parse(file_id);
let tok = best_token_at_offset(&parse.syntax_node(), pos)?;
// Special case for goto flake inputs.
if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) {
return Some(ret);
}
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => |
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
}
#[track_caller]
fn check(fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }",
expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }",
expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs.$0nixpkgs.url = "github:NixOS/nixpkgs";
inputs.nix.url = "github:NixOS/nix";
output = { ... }: { };
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// Flake input in string form.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs = {
nixpkgs = { url = "github:NixOS/nixpkgs"; };
"n$0ix" = { url = "github:NixOS/nix"; };
};
output = { ... }: { };
}
"#,
expect!["file:///nix/store/oooo/flake.nix"],
);
// Not a flake input.
check_no(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
inputs'.$0nixpkgs.no = 42;
}
"#,
);
// Not a flake input.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
outputs = { nixpkgs, ... }: $0nixpkgs;
"#,
expect!["{ <nixpkgs>, ... }: nixpkgs"],
);
}
#[test]
fn flake_output_pat() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
outputs = { $0nixpkgs, ... }: nixpkgs;
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// `self` in parameter is no an input.
check_no(
r#"
#- /flake.nix input:self=/nix/store/eeee
{
outputs = { $0self, ... }: self;
}
"#,
);
}
}
| {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
} | conditional_block |
goto_definition.rs | use super::NavigationTarget;
use crate::def::{AstPtr, Expr, Literal, ResolveResult};
use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath};
use nix_interop::FLAKE_FILE;
use syntax::ast::{self, AstNode};
use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum GotoDefinitionResult {
Path(VfsPath),
Targets(Vec<NavigationTarget>),
}
pub(crate) fn goto_definition(
db: &dyn DefDatabase,
FilePos { file_id, pos }: FilePos,
) -> Option<GotoDefinitionResult> {
let parse = db.parse(file_id);
let tok = best_token_at_offset(&parse.syntax_node(), pos)?;
// Special case for goto flake inputs.
if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) {
return Some(ret);
}
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
}
#[track_caller]
fn | (fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }",
expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }",
expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs.$0nixpkgs.url = "github:NixOS/nixpkgs";
inputs.nix.url = "github:NixOS/nix";
output = { ... }: { };
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// Flake input in string form.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs = {
nixpkgs = { url = "github:NixOS/nixpkgs"; };
"n$0ix" = { url = "github:NixOS/nix"; };
};
output = { ... }: { };
}
"#,
expect!["file:///nix/store/oooo/flake.nix"],
);
// Not a flake input.
check_no(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
inputs'.$0nixpkgs.no = 42;
}
"#,
);
// Not a flake input.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
outputs = { nixpkgs, ... }: $0nixpkgs;
"#,
expect!["{ <nixpkgs>, ... }: nixpkgs"],
);
}
#[test]
fn flake_output_pat() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
outputs = { $0nixpkgs, ... }: nixpkgs;
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// `self` in parameter is no an input.
check_no(
r#"
#- /flake.nix input:self=/nix/store/eeee
{
outputs = { $0self, ... }: self;
}
"#,
);
}
}
| check | identifier_name |
goto_definition.rs | use super::NavigationTarget;
use crate::def::{AstPtr, Expr, Literal, ResolveResult};
use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath};
use nix_interop::FLAKE_FILE;
use syntax::ast::{self, AstNode};
use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum GotoDefinitionResult {
Path(VfsPath),
Targets(Vec<NavigationTarget>),
}
pub(crate) fn goto_definition(
db: &dyn DefDatabase,
FilePos { file_id, pos }: FilePos,
) -> Option<GotoDefinitionResult> {
let parse = db.parse(file_id);
let tok = best_token_at_offset(&parse.syntax_node(), pos)?;
// Special case for goto flake inputs.
if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) {
return Some(ret);
}
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Ref(n) => Some(AstPtr::new(n.syntax())),
ast::Name(n) => Some(AstPtr::new(n.syntax())),
ast::Literal(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let source_map = db.source_map(file_id);
let expr_id = source_map.expr_for_node(ptr)?;
// Special case for goto-path.
if tok.kind() == SyntaxKind::PATH {
let module = db.module(file_id);
let Expr::Literal(Literal::Path(path)) = &module[expr_id] else {
return None;
};
let path = path.resolve(db)?;
return Some(GotoDefinitionResult::Path(path));
}
let name_res = db.name_resolution(file_id);
let targets = match name_res.get(expr_id)? {
&ResolveResult::Definition(name) => source_map
.nodes_for_name(name)
.filter_map(|ptr| {
let name_node = ptr.to_node(&parse.syntax_node());
let full_node = name_node.ancestors().find(|n| {
matches!(
n.kind(),
SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT
)
})?;
Some(NavigationTarget {
file_id,
focus_range: name_node.text_range(),
full_range: full_node.text_range(),
})
})
.collect(),
ResolveResult::WithExprs(withs) => {
withs
.iter()
.filter_map(|&with_expr| {
// with expr; body
// ^--^ focus
// ^--------^ full
let with_node = source_map
.node_for_expr(with_expr)
.expect("WithExprs must be valid")
.to_node(&parse.syntax_node());
let with_node = ast::With::cast(with_node).expect("WithExprs must be valid");
let with_token_range = with_node.with_token()?.text_range();
let with_header_end = with_node
.semicolon_token()
.map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range());
let with_header = with_token_range.cover(with_header_end);
Some(NavigationTarget {
file_id,
focus_range: with_token_range,
full_range: with_header,
})
})
.collect()
}
// Currently builtin names cannot "goto-definition".
ResolveResult::Builtin(_) => return None,
};
Some(GotoDefinitionResult::Targets(targets))
}
fn goto_flake_input(
db: &dyn DefDatabase,
file: FileId,
tok: SyntaxToken,
) -> Option<GotoDefinitionResult> {
let module_kind = db.module_kind(file);
let ModuleKind::FlakeNix {
explicit_inputs,
param_inputs,
..
} = &*module_kind
else {
return None;
};
let flake_info = db.source_root_flake_info(db.file_source_root(file))?;
let ptr = tok.parent_ancestors().find_map(|node| {
match_ast! {
match node {
ast::Attr(n) => Some(AstPtr::new(n.syntax())),
_ => None,
}
}
})?;
let module = db.module(file);
let source_map = db.source_map(file);
let name_id = source_map.name_for_node(ptr)?;
let name_str = &*module[name_id].text;
if explicit_inputs.get(name_str) == Some(&name_id)
|| param_inputs.get(name_str) == Some(&name_id)
{
let target = flake_info
.input_store_paths
.get(name_str)?
.join(FLAKE_FILE)?;
return Some(GotoDefinitionResult::Path(target));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SourceDatabase;
use crate::tests::TestDB;
use expect_test::{expect, Expect};
#[track_caller]
fn check_no(fixture: &str) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
assert_eq!(goto_definition(&db, f[0]), None);
}
#[track_caller]
fn check(fixture: &str, expect: Expect) {
let (db, f) = TestDB::from_fixture(fixture).unwrap();
assert_eq!(f.markers().len(), 1, "Missing markers");
let mut got = match goto_definition(&db, f[0]).expect("No definition") {
GotoDefinitionResult::Path(path) => format!("file://{}", path.display()),
GotoDefinitionResult::Targets(targets) => {
assert!(!targets.is_empty());
targets
.into_iter()
.map(|target| {
assert!(target.full_range.contains_range(target.focus_range));
let src = db.file_content(target.file_id);
let mut full = src[target.full_range].to_owned();
let relative_focus = target.focus_range - target.full_range.start();
full.insert(relative_focus.end().into(), '>');
full.insert(relative_focus.start().into(), '<');
full
})
.collect::<Vec<_>>()
.join("\n")
}
};
// Prettify.
if got.contains('\n') {
got += "\n";
}
expect.assert_eq(&got);
}
#[test]
fn not_found() {
check_no("$0a");
check_no("b: $0a");
}
#[test]
fn invalid_position() {
check_no("1 $0+ 2");
check_no("wi$0th 1; 2");
}
#[test]
fn lambda_param() {
check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]);
check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]);
check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]);
check("a: ({ x ? $0a }@a: a) 1", expect!["{ x ? a }@<a>: a"]);
check("a: ({ x ? $0x }@a: a) 1", expect!["{ <x> ? x }@a: a"]);
}
#[test]
fn with_env() {
check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]);
check(
"with 1; let a = 1; in with 2; $0b",
expect![[r#"
<with> 2;
<with> 1;
"#]],
);
}
#[test]
fn bindings() {
check(
"let a = a; in rec { inherit a; b = $0a; }", | expect!["<a> = a;"],
);
check(
"let a = $0a; in rec { inherit a; b = a; }",
expect!["<a> = a;"],
);
}
#[test]
fn left_and_right() {
check("let a = 1; in $0a ", expect!["<a> = 1;"]);
check("let a = 1; in a$0 ", expect!["<a> = 1;"]);
check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]);
check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]);
}
#[test]
fn merged_binding() {
check(
"let a.a = 1; a.b = 2; a = { c = 3; }; in $0a",
expect![[r#"
<a>.a = 1;
<a>.b = 2;
<a> = { c = 3; };
"#]],
);
check(
"rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }",
expect![[r#"
<a> = { a = 1; };
<a> = { a = 2; };
"#]],
);
}
#[test]
fn builtin() {
check("let true = 1; in $0true && false", expect!["<true> = 1;"]);
check_no("let true = 1; in true && $0false");
}
#[test]
fn path() {
check("1 + $0./.", expect!["file:///"]);
check(
"
#- /default.nix
import $0./bar.nix
#- /bar.nix
hello
",
expect!["file:///bar.nix"],
);
}
#[test]
fn flake_input() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs.$0nixpkgs.url = "github:NixOS/nixpkgs";
inputs.nix.url = "github:NixOS/nix";
output = { ... }: { };
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// Flake input in string form.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo
{
description = "Hello flake";
inputs = {
nixpkgs = { url = "github:NixOS/nixpkgs"; };
"n$0ix" = { url = "github:NixOS/nix"; };
};
output = { ... }: { };
}
"#,
expect!["file:///nix/store/oooo/flake.nix"],
);
// Not a flake input.
check_no(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
inputs'.$0nixpkgs.no = 42;
}
"#,
);
// Not a flake input.
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
description = "Hello flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs";
outputs = { nixpkgs, ... }: $0nixpkgs;
"#,
expect!["{ <nixpkgs>, ... }: nixpkgs"],
);
}
#[test]
fn flake_output_pat() {
check(
r#"
#- /flake.nix input:nixpkgs=/nix/store/eeee
{
outputs = { $0nixpkgs, ... }: nixpkgs;
}
"#,
expect!["file:///nix/store/eeee/flake.nix"],
);
// `self` in parameter is no an input.
check_no(
r#"
#- /flake.nix input:self=/nix/store/eeee
{
outputs = { $0self, ... }: self;
}
"#,
);
}
} | expect!["inherit <a>;"],
);
check(
"let a = a; in rec { inherit $0a; b = a; }", | random_line_split |
sink_test.go | // Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
"math"
"net/url"
"strconv"
"sync"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
var zeroTS hlc.Timestamp
type asyncProducerMock struct {
inputCh chan *sarama.ProducerMessage
successesCh chan *sarama.ProducerMessage
errorsCh chan *sarama.ProducerError
mu struct {
syncutil.Mutex
outstanding []*sarama.ProducerMessage
}
}
const unbuffered = 0
func newAsyncProducerMock(bufSize int) *asyncProducerMock {
return &asyncProducerMock{
inputCh: make(chan *sarama.ProducerMessage, bufSize),
successesCh: make(chan *sarama.ProducerMessage, bufSize),
errorsCh: make(chan *sarama.ProducerError, bufSize),
}
}
func (p *asyncProducerMock) Input() chan<- *sarama.ProducerMessage { return p.inputCh }
func (p *asyncProducerMock) Successes() <-chan *sarama.ProducerMessage { return p.successesCh }
func (p *asyncProducerMock) Errors() <-chan *sarama.ProducerError { return p.errorsCh }
func (p *asyncProducerMock) AsyncClose() { panic(`unimplemented`) }
func (p *asyncProducerMock) Close() error {
close(p.inputCh)
close(p.successesCh)
close(p.errorsCh)
return nil
}
// consumeAndSucceed consumes input messages and sends them to successes channel.
// Returns function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consumeAndSucceed() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.successesCh <- m
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// consume consumes input messages but does not acknowledge neither successes, nor errors.
// In essence, this simulates an unreachable kafka sink.
// Use acknowledge methods to acknowledge successes or errors.
// Returns a function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consume() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.mu.Lock()
p.mu.outstanding = append(p.mu.outstanding, m)
p.mu.Unlock()
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// acknowledge sends acknowledgements on the specified channel
// for each of the outstanding messages.
func (p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding {
ch <- m
}
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) outstanding() int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 64)
return tableDescriptorTopic{
tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}).BuildImmutableTable()}
}
ctx := context.Background()
s, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{UseDatabase: "d"})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw)
sqlDB.Exec(t, `CREATE DATABASE d`)
sinkURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
sinkURL.Path = `d`
fooTopic := overrideTopic(`foo`)
barTopic := overrideTopic(`bar`)
targets := jobspb.ChangefeedTargets{
fooTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `foo`},
barTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `bar`},
}
sink, err := makeSQLSink(sinkURL.String(), `sink`, targets)
require.NoError(t, err)
defer func() { require.NoError(t, sink.Close()) }()
// Empty
require.NoError(t, sink.Flush(ctx))
// Undeclared topic
require.EqualError(t,
sink.EmitRow(ctx, overrideTopic(`nope`), nil, nil, zeroTS), `cannot emit to undeclared topic: `)
// With one row, nothing flushes until Flush is called.
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v0`), zeroTS))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{},
)
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`k1`, `v0`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Verify the implicit flushing
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`0`}})
for i := 0; i < sqlSinkRowBatchSize+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v`+strconv.Itoa(i)), zeroTS))
}
// Should have auto flushed after sqlSinkRowBatchSize
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`3`}})
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`4`}})
sqlDB.Exec(t, `TRUNCATE sink`)
// Two tables interleaved in time
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v0`), zeroTS)) | require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT topic, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`bar`, `kbar`, `v0`}, {`foo`, `kfoo`, `v0`}, {`foo`, `kfoo`, `v1`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Multiple keys interleaved in time. Use sqlSinkNumPartitions+1 keys to
// guarantee that at lease two of them end up in the same partition.
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v0`), zeroTS))
}
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v1`), zeroTS))
}
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT partition, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`0`, `v3`, `v0`},
{`0`, `v3`, `v1`},
{`1`, `v1`, `v0`},
{`1`, `v2`, `v0`},
{`1`, `v1`, `v1`},
{`1`, `v2`, `v1`},
{`2`, `v0`, `v0`},
{`2`, `v0`, `v1`},
},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Emit resolved
var e testEncoder
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`foo0`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, hlc.Timestamp{WallTime: 1}))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t,
`SELECT topic, partition, key, value, resolved FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`bar`, `0`, ``, ``, `0,0`},
{`bar`, `0`, ``, ``, `0.000000001,0`},
{`bar`, `1`, ``, ``, `0,0`},
{`bar`, `1`, ``, ``, `0.000000001,0`},
{`bar`, `2`, ``, ``, `0,0`},
{`bar`, `2`, ``, ``, `0.000000001,0`},
{`foo`, `0`, ``, ``, `0,0`},
{`foo`, `0`, `foo0`, `v0`, ``},
{`foo`, `0`, ``, ``, `0.000000001,0`},
{`foo`, `1`, ``, ``, `0,0`},
{`foo`, `1`, ``, ``, `0.000000001,0`},
{`foo`, `2`, ``, ``, `0,0`},
{`foo`, `2`, ``, ``, `0.000000001,0`},
},
)
}
func TestSaramaConfigOptionParsing(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
opts := make(map[string]string)
cfg, err := getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, defaultSaramaConfig, cfg)
expected := &saramaConfig{}
expected.Flush.MaxMessages = 1000
expected.Flush.Frequency = jsonDuration(time.Second)
opts[changefeedbase.OptKafkaSinkConfig] = `{"Flush": {"MaxMessages": 1000, "Frequency": "1s"}}`
cfg, err = getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, expected, cfg)
}
func TestKafkaSinkTracksMemory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
memCapacity := mon.DefaultPoolAllocationSize
// Use fake kafka sink which "consumes" all messages on its input channel,
// but does not acknowledge them automatically (i.e. slow sink)
p := newAsyncProducerMock(unbuffered)
stopConsume := p.consume()
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memCapacity, "t")
defer func() {
stopConsume()
cleanup()
}()
// No inflight
require.NoError(t, sink.Flush(ctx))
// Emit few messages
rnd, _ := randutil.NewTestPseudoRand()
key := randutil.RandBytes(rnd, 1+rnd.Intn(64))
val := randutil.RandBytes(rnd, 1+rnd.Intn(512))
kvLen := int64(len(key)) + int64(len(val))
testTopic := topic(`t`)
for i := 0; i < 10; i++ {
require.NoError(t, sink.EmitRow(ctx, testTopic, key, val, zeroTS))
}
memUsed := func() int64 {
sink.mu.Lock()
defer sink.mu.Unlock()
return sink.mu.mem.Used()
}
require.Equal(t, 10*kvLen, memUsed())
// Acknowledge outstanding messages, and flush.
p.acknowledge(10, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try emitting resolved timestamp. This message type is different from the
// regular messages since it doesn't have Key set.
// We bypass majority of EmitResolvedTimestamp logic since we don't have
// a real kafka client instantiated. Instead, we call emitMessage directly.
reg := makeTestSchemaRegistry()
defer reg.Close()
opts := map[string]string{
changefeedbase.OptEnvelope: string(changefeedbase.OptEnvelopeWrapped),
changefeedbase.OptConfluentSchemaRegistry: reg.server.URL,
}
encoder, err := newConfluentAvroEncoder(opts, makeChangefeedTargets("t"))
require.NoError(t, err)
payload, err := encoder.EncodeResolvedTimestamp(ctx, "t", hlc.Timestamp{})
require.NoError(t, err)
msg := &sarama.ProducerMessage{
Topic: "t",
Key: nil,
Value: sarama.ByteEncoder(payload),
}
require.NoError(t, sink.emitMessage(ctx, msg))
p.acknowledge(1, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try to emit more than we can handle.
expectOverflow := memCapacity / kvLen
for err == nil {
err = sink.EmitRow(ctx, testTopic, key, val, zeroTS)
}
require.Regexp(t, `memory budget exceeded`, err)
// We failed to allocate more memory, but we should have used
// memory for the expectOverflow key/values.
require.EqualValues(t, expectOverflow*kvLen, memUsed())
} | require.NoError(t, sink.EmitRow(ctx, barTopic, []byte(`kbar`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v1`), zeroTS)) | random_line_split |
sink_test.go | // Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
"math"
"net/url"
"strconv"
"sync"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
var zeroTS hlc.Timestamp
type asyncProducerMock struct {
inputCh chan *sarama.ProducerMessage
successesCh chan *sarama.ProducerMessage
errorsCh chan *sarama.ProducerError
mu struct {
syncutil.Mutex
outstanding []*sarama.ProducerMessage
}
}
const unbuffered = 0
func newAsyncProducerMock(bufSize int) *asyncProducerMock {
return &asyncProducerMock{
inputCh: make(chan *sarama.ProducerMessage, bufSize),
successesCh: make(chan *sarama.ProducerMessage, bufSize),
errorsCh: make(chan *sarama.ProducerError, bufSize),
}
}
func (p *asyncProducerMock) Input() chan<- *sarama.ProducerMessage { return p.inputCh }
func (p *asyncProducerMock) Successes() <-chan *sarama.ProducerMessage { return p.successesCh }
func (p *asyncProducerMock) Errors() <-chan *sarama.ProducerError { return p.errorsCh }
func (p *asyncProducerMock) AsyncClose() |
func (p *asyncProducerMock) Close() error {
close(p.inputCh)
close(p.successesCh)
close(p.errorsCh)
return nil
}
// consumeAndSucceed consumes input messages and sends them to successes channel.
// Returns function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consumeAndSucceed() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.successesCh <- m
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// consume consumes input messages but does not acknowledge neither successes, nor errors.
// In essence, this simulates an unreachable kafka sink.
// Use acknowledge methods to acknowledge successes or errors.
// Returns a function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consume() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.mu.Lock()
p.mu.outstanding = append(p.mu.outstanding, m)
p.mu.Unlock()
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// acknowledge sends acknowledgements on the specified channel
// for each of the outstanding messages.
func (p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding {
ch <- m
}
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) outstanding() int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 64)
return tableDescriptorTopic{
tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}).BuildImmutableTable()}
}
ctx := context.Background()
s, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{UseDatabase: "d"})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw)
sqlDB.Exec(t, `CREATE DATABASE d`)
sinkURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
sinkURL.Path = `d`
fooTopic := overrideTopic(`foo`)
barTopic := overrideTopic(`bar`)
targets := jobspb.ChangefeedTargets{
fooTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `foo`},
barTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `bar`},
}
sink, err := makeSQLSink(sinkURL.String(), `sink`, targets)
require.NoError(t, err)
defer func() { require.NoError(t, sink.Close()) }()
// Empty
require.NoError(t, sink.Flush(ctx))
// Undeclared topic
require.EqualError(t,
sink.EmitRow(ctx, overrideTopic(`nope`), nil, nil, zeroTS), `cannot emit to undeclared topic: `)
// With one row, nothing flushes until Flush is called.
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v0`), zeroTS))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{},
)
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`k1`, `v0`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Verify the implicit flushing
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`0`}})
for i := 0; i < sqlSinkRowBatchSize+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v`+strconv.Itoa(i)), zeroTS))
}
// Should have auto flushed after sqlSinkRowBatchSize
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`3`}})
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`4`}})
sqlDB.Exec(t, `TRUNCATE sink`)
// Two tables interleaved in time
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, barTopic, []byte(`kbar`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v1`), zeroTS))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT topic, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`bar`, `kbar`, `v0`}, {`foo`, `kfoo`, `v0`}, {`foo`, `kfoo`, `v1`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Multiple keys interleaved in time. Use sqlSinkNumPartitions+1 keys to
// guarantee that at lease two of them end up in the same partition.
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v0`), zeroTS))
}
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v1`), zeroTS))
}
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT partition, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`0`, `v3`, `v0`},
{`0`, `v3`, `v1`},
{`1`, `v1`, `v0`},
{`1`, `v2`, `v0`},
{`1`, `v1`, `v1`},
{`1`, `v2`, `v1`},
{`2`, `v0`, `v0`},
{`2`, `v0`, `v1`},
},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Emit resolved
var e testEncoder
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`foo0`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, hlc.Timestamp{WallTime: 1}))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t,
`SELECT topic, partition, key, value, resolved FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`bar`, `0`, ``, ``, `0,0`},
{`bar`, `0`, ``, ``, `0.000000001,0`},
{`bar`, `1`, ``, ``, `0,0`},
{`bar`, `1`, ``, ``, `0.000000001,0`},
{`bar`, `2`, ``, ``, `0,0`},
{`bar`, `2`, ``, ``, `0.000000001,0`},
{`foo`, `0`, ``, ``, `0,0`},
{`foo`, `0`, `foo0`, `v0`, ``},
{`foo`, `0`, ``, ``, `0.000000001,0`},
{`foo`, `1`, ``, ``, `0,0`},
{`foo`, `1`, ``, ``, `0.000000001,0`},
{`foo`, `2`, ``, ``, `0,0`},
{`foo`, `2`, ``, ``, `0.000000001,0`},
},
)
}
func TestSaramaConfigOptionParsing(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
opts := make(map[string]string)
cfg, err := getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, defaultSaramaConfig, cfg)
expected := &saramaConfig{}
expected.Flush.MaxMessages = 1000
expected.Flush.Frequency = jsonDuration(time.Second)
opts[changefeedbase.OptKafkaSinkConfig] = `{"Flush": {"MaxMessages": 1000, "Frequency": "1s"}}`
cfg, err = getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, expected, cfg)
}
func TestKafkaSinkTracksMemory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
memCapacity := mon.DefaultPoolAllocationSize
// Use fake kafka sink which "consumes" all messages on its input channel,
// but does not acknowledge them automatically (i.e. slow sink)
p := newAsyncProducerMock(unbuffered)
stopConsume := p.consume()
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memCapacity, "t")
defer func() {
stopConsume()
cleanup()
}()
// No inflight
require.NoError(t, sink.Flush(ctx))
// Emit few messages
rnd, _ := randutil.NewTestPseudoRand()
key := randutil.RandBytes(rnd, 1+rnd.Intn(64))
val := randutil.RandBytes(rnd, 1+rnd.Intn(512))
kvLen := int64(len(key)) + int64(len(val))
testTopic := topic(`t`)
for i := 0; i < 10; i++ {
require.NoError(t, sink.EmitRow(ctx, testTopic, key, val, zeroTS))
}
memUsed := func() int64 {
sink.mu.Lock()
defer sink.mu.Unlock()
return sink.mu.mem.Used()
}
require.Equal(t, 10*kvLen, memUsed())
// Acknowledge outstanding messages, and flush.
p.acknowledge(10, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try emitting resolved timestamp. This message type is different from the
// regular messages since it doesn't have Key set.
// We bypass majority of EmitResolvedTimestamp logic since we don't have
// a real kafka client instantiated. Instead, we call emitMessage directly.
reg := makeTestSchemaRegistry()
defer reg.Close()
opts := map[string]string{
changefeedbase.OptEnvelope: string(changefeedbase.OptEnvelopeWrapped),
changefeedbase.OptConfluentSchemaRegistry: reg.server.URL,
}
encoder, err := newConfluentAvroEncoder(opts, makeChangefeedTargets("t"))
require.NoError(t, err)
payload, err := encoder.EncodeResolvedTimestamp(ctx, "t", hlc.Timestamp{})
require.NoError(t, err)
msg := &sarama.ProducerMessage{
Topic: "t",
Key: nil,
Value: sarama.ByteEncoder(payload),
}
require.NoError(t, sink.emitMessage(ctx, msg))
p.acknowledge(1, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try to emit more than we can handle.
expectOverflow := memCapacity / kvLen
for err == nil {
err = sink.EmitRow(ctx, testTopic, key, val, zeroTS)
}
require.Regexp(t, `memory budget exceeded`, err)
// We failed to allocate more memory, but we should have used
// memory for the expectOverflow key/values.
require.EqualValues(t, expectOverflow*kvLen, memUsed())
}
| { panic(`unimplemented`) } | identifier_body |
sink_test.go | // Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
"math"
"net/url"
"strconv"
"sync"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
var zeroTS hlc.Timestamp
type asyncProducerMock struct {
inputCh chan *sarama.ProducerMessage
successesCh chan *sarama.ProducerMessage
errorsCh chan *sarama.ProducerError
mu struct {
syncutil.Mutex
outstanding []*sarama.ProducerMessage
}
}
const unbuffered = 0
func newAsyncProducerMock(bufSize int) *asyncProducerMock {
return &asyncProducerMock{
inputCh: make(chan *sarama.ProducerMessage, bufSize),
successesCh: make(chan *sarama.ProducerMessage, bufSize),
errorsCh: make(chan *sarama.ProducerError, bufSize),
}
}
func (p *asyncProducerMock) Input() chan<- *sarama.ProducerMessage { return p.inputCh }
func (p *asyncProducerMock) Successes() <-chan *sarama.ProducerMessage { return p.successesCh }
func (p *asyncProducerMock) Errors() <-chan *sarama.ProducerError { return p.errorsCh }
func (p *asyncProducerMock) AsyncClose() { panic(`unimplemented`) }
func (p *asyncProducerMock) Close() error {
close(p.inputCh)
close(p.successesCh)
close(p.errorsCh)
return nil
}
// consumeAndSucceed consumes input messages and sends them to successes channel.
// Returns function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consumeAndSucceed() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.successesCh <- m
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// consume consumes input messages but does not acknowledge neither successes, nor errors.
// In essence, this simulates an unreachable kafka sink.
// Use acknowledge methods to acknowledge successes or errors.
// Returns a function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consume() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.mu.Lock()
p.mu.outstanding = append(p.mu.outstanding, m)
p.mu.Unlock()
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// acknowledge sends acknowledgements on the specified channel
// for each of the outstanding messages.
func (p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding |
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) outstanding() int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 64)
return tableDescriptorTopic{
tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}).BuildImmutableTable()}
}
ctx := context.Background()
s, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{UseDatabase: "d"})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw)
sqlDB.Exec(t, `CREATE DATABASE d`)
sinkURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
sinkURL.Path = `d`
fooTopic := overrideTopic(`foo`)
barTopic := overrideTopic(`bar`)
targets := jobspb.ChangefeedTargets{
fooTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `foo`},
barTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `bar`},
}
sink, err := makeSQLSink(sinkURL.String(), `sink`, targets)
require.NoError(t, err)
defer func() { require.NoError(t, sink.Close()) }()
// Empty
require.NoError(t, sink.Flush(ctx))
// Undeclared topic
require.EqualError(t,
sink.EmitRow(ctx, overrideTopic(`nope`), nil, nil, zeroTS), `cannot emit to undeclared topic: `)
// With one row, nothing flushes until Flush is called.
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v0`), zeroTS))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{},
)
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`k1`, `v0`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Verify the implicit flushing
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`0`}})
for i := 0; i < sqlSinkRowBatchSize+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v`+strconv.Itoa(i)), zeroTS))
}
// Should have auto flushed after sqlSinkRowBatchSize
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`3`}})
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`4`}})
sqlDB.Exec(t, `TRUNCATE sink`)
// Two tables interleaved in time
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, barTopic, []byte(`kbar`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v1`), zeroTS))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT topic, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`bar`, `kbar`, `v0`}, {`foo`, `kfoo`, `v0`}, {`foo`, `kfoo`, `v1`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Multiple keys interleaved in time. Use sqlSinkNumPartitions+1 keys to
// guarantee that at lease two of them end up in the same partition.
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v0`), zeroTS))
}
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v1`), zeroTS))
}
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT partition, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`0`, `v3`, `v0`},
{`0`, `v3`, `v1`},
{`1`, `v1`, `v0`},
{`1`, `v2`, `v0`},
{`1`, `v1`, `v1`},
{`1`, `v2`, `v1`},
{`2`, `v0`, `v0`},
{`2`, `v0`, `v1`},
},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Emit resolved
var e testEncoder
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`foo0`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, hlc.Timestamp{WallTime: 1}))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t,
`SELECT topic, partition, key, value, resolved FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`bar`, `0`, ``, ``, `0,0`},
{`bar`, `0`, ``, ``, `0.000000001,0`},
{`bar`, `1`, ``, ``, `0,0`},
{`bar`, `1`, ``, ``, `0.000000001,0`},
{`bar`, `2`, ``, ``, `0,0`},
{`bar`, `2`, ``, ``, `0.000000001,0`},
{`foo`, `0`, ``, ``, `0,0`},
{`foo`, `0`, `foo0`, `v0`, ``},
{`foo`, `0`, ``, ``, `0.000000001,0`},
{`foo`, `1`, ``, ``, `0,0`},
{`foo`, `1`, ``, ``, `0.000000001,0`},
{`foo`, `2`, ``, ``, `0,0`},
{`foo`, `2`, ``, ``, `0.000000001,0`},
},
)
}
func TestSaramaConfigOptionParsing(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
opts := make(map[string]string)
cfg, err := getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, defaultSaramaConfig, cfg)
expected := &saramaConfig{}
expected.Flush.MaxMessages = 1000
expected.Flush.Frequency = jsonDuration(time.Second)
opts[changefeedbase.OptKafkaSinkConfig] = `{"Flush": {"MaxMessages": 1000, "Frequency": "1s"}}`
cfg, err = getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, expected, cfg)
}
func TestKafkaSinkTracksMemory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
memCapacity := mon.DefaultPoolAllocationSize
// Use fake kafka sink which "consumes" all messages on its input channel,
// but does not acknowledge them automatically (i.e. slow sink)
p := newAsyncProducerMock(unbuffered)
stopConsume := p.consume()
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memCapacity, "t")
defer func() {
stopConsume()
cleanup()
}()
// No inflight
require.NoError(t, sink.Flush(ctx))
// Emit few messages
rnd, _ := randutil.NewTestPseudoRand()
key := randutil.RandBytes(rnd, 1+rnd.Intn(64))
val := randutil.RandBytes(rnd, 1+rnd.Intn(512))
kvLen := int64(len(key)) + int64(len(val))
testTopic := topic(`t`)
for i := 0; i < 10; i++ {
require.NoError(t, sink.EmitRow(ctx, testTopic, key, val, zeroTS))
}
memUsed := func() int64 {
sink.mu.Lock()
defer sink.mu.Unlock()
return sink.mu.mem.Used()
}
require.Equal(t, 10*kvLen, memUsed())
// Acknowledge outstanding messages, and flush.
p.acknowledge(10, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try emitting resolved timestamp. This message type is different from the
// regular messages since it doesn't have Key set.
// We bypass majority of EmitResolvedTimestamp logic since we don't have
// a real kafka client instantiated. Instead, we call emitMessage directly.
reg := makeTestSchemaRegistry()
defer reg.Close()
opts := map[string]string{
changefeedbase.OptEnvelope: string(changefeedbase.OptEnvelopeWrapped),
changefeedbase.OptConfluentSchemaRegistry: reg.server.URL,
}
encoder, err := newConfluentAvroEncoder(opts, makeChangefeedTargets("t"))
require.NoError(t, err)
payload, err := encoder.EncodeResolvedTimestamp(ctx, "t", hlc.Timestamp{})
require.NoError(t, err)
msg := &sarama.ProducerMessage{
Topic: "t",
Key: nil,
Value: sarama.ByteEncoder(payload),
}
require.NoError(t, sink.emitMessage(ctx, msg))
p.acknowledge(1, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try to emit more than we can handle.
expectOverflow := memCapacity / kvLen
for err == nil {
err = sink.EmitRow(ctx, testTopic, key, val, zeroTS)
}
require.Regexp(t, `memory budget exceeded`, err)
// We failed to allocate more memory, but we should have used
// memory for the expectOverflow key/values.
require.EqualValues(t, expectOverflow*kvLen, memUsed())
}
| {
ch <- m
} | conditional_block |
sink_test.go | // Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
"math"
"net/url"
"strconv"
"sync"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
var zeroTS hlc.Timestamp
type asyncProducerMock struct {
inputCh chan *sarama.ProducerMessage
successesCh chan *sarama.ProducerMessage
errorsCh chan *sarama.ProducerError
mu struct {
syncutil.Mutex
outstanding []*sarama.ProducerMessage
}
}
const unbuffered = 0
func newAsyncProducerMock(bufSize int) *asyncProducerMock {
return &asyncProducerMock{
inputCh: make(chan *sarama.ProducerMessage, bufSize),
successesCh: make(chan *sarama.ProducerMessage, bufSize),
errorsCh: make(chan *sarama.ProducerError, bufSize),
}
}
func (p *asyncProducerMock) Input() chan<- *sarama.ProducerMessage { return p.inputCh }
func (p *asyncProducerMock) Successes() <-chan *sarama.ProducerMessage { return p.successesCh }
func (p *asyncProducerMock) Errors() <-chan *sarama.ProducerError { return p.errorsCh }
func (p *asyncProducerMock) AsyncClose() { panic(`unimplemented`) }
func (p *asyncProducerMock) Close() error {
close(p.inputCh)
close(p.successesCh)
close(p.errorsCh)
return nil
}
// consumeAndSucceed consumes input messages and sends them to successes channel.
// Returns function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consumeAndSucceed() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.successesCh <- m
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// consume consumes input messages but does not acknowledge neither successes, nor errors.
// In essence, this simulates an unreachable kafka sink.
// Use acknowledge methods to acknowledge successes or errors.
// Returns a function that must be called to stop this consumer
// to clean up. The cleanup function must be called before closing asyncProducerMock.
func (p *asyncProducerMock) consume() (cleanup func()) {
var wg sync.WaitGroup
wg.Add(1)
done := make(chan struct{})
go func() {
defer wg.Done()
for {
select {
case <-done:
return
case m := <-p.inputCh:
p.mu.Lock()
p.mu.outstanding = append(p.mu.outstanding, m)
p.mu.Unlock()
}
}
}()
return func() {
close(done)
wg.Wait()
}
}
// acknowledge sends acknowledgements on the specified channel
// for each of the outstanding messages.
func (p *asyncProducerMock) acknowledge(n int, ch chan *sarama.ProducerMessage) {
for n > 0 {
var outstanding []*sarama.ProducerMessage
p.mu.Lock()
outstanding = append(outstanding, p.mu.outstanding...)
p.mu.outstanding = p.mu.outstanding[:0]
p.mu.Unlock()
for _, m := range outstanding {
ch <- m
}
n -= len(outstanding)
}
}
// outstanding returns the number of un-acknowledged messages.
func (p *asyncProducerMock) | () int {
p.mu.Lock()
defer p.mu.Unlock()
return len(p.mu.outstanding)
}
func topic(name string) tableDescriptorTopic {
return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()}
}
const memoryUnlimited int64 = math.MaxInt64
const noTopicPrefix = ""
const defaultTopicName = ""
func getBoundAccountWithBudget(budget int64) (account mon.BoundAccount, cleanup func()) {
mm := mon.NewMonitorWithLimit(
"test-mm", mon.MemoryResource, budget,
nil, nil, mon.DefaultPoolAllocationSize, 100,
cluster.MakeTestingClusterSettings())
mm.Start(context.Background(), nil, mon.MakeStandaloneBudget(budget))
return mm.MakeBoundAccount(), func() { mm.Stop(context.Background()) }
}
func makeTestKafkaSink(
t testing.TB,
topicPrefix string,
topicNameOverride string,
p sarama.AsyncProducer,
budget int64,
targetNames ...string,
) (s *kafkaSink, cleanup func()) {
mem, release := getBoundAccountWithBudget(budget)
targets := makeChangefeedTargets(targetNames...)
s = &kafkaSink{
ctx: context.Background(),
topics: makeTopicsMap(topicPrefix, topicNameOverride, targets),
producer: p,
}
s.mu.mem = mem
s.start()
return s, func() {
require.NoError(t, s.Close())
release()
}
}
func makeChangefeedTargets(targetNames ...string) jobspb.ChangefeedTargets {
targets := make(jobspb.ChangefeedTargets, len(targetNames))
for i, name := range targetNames {
targets[descpb.ID(i)] = jobspb.ChangefeedTarget{StatementTimeName: name}
}
return targets
}
func TestKafkaSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, "t")
defer cleanup()
// No inflight
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Timeout
if err := sink.EmitRow(ctx, topic(`t`), []byte(`1`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m1 := <-p.inputCh
for i := 0; i < 2; i++ {
timeoutCtx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
if err := sink.Flush(timeoutCtx); !testutils.IsError(
err, `context deadline exceeded`,
) {
t.Fatalf(`expected "context deadline exceeded" error got: %+v`, err)
}
}
go func() { p.successesCh <- m1 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Check no inflight again now that we've sent something
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
// Mixed success and error.
if err := sink.EmitRow(ctx, topic(`t`), []byte(`2`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m2 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`3`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m3 := <-p.inputCh
if err := sink.EmitRow(ctx, topic(`t`), []byte(`4`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m4 := <-p.inputCh
go func() { p.successesCh <- m2 }()
go func() {
p.errorsCh <- &sarama.ProducerError{
Msg: m3,
Err: errors.New("m3"),
}
}()
go func() { p.successesCh <- m4 }()
if err := sink.Flush(ctx); !testutils.IsError(err, `m3`) {
t.Fatalf(`expected "m3" error got: %+v`, err)
}
// Check simple success again after error
if err := sink.EmitRow(ctx, topic(`t`), []byte(`5`), nil, zeroTS); err != nil {
t.Fatal(err)
}
m5 := <-p.inputCh
go func() { p.successesCh <- m5 }()
if err := sink.Flush(ctx); err != nil {
t.Fatal(err)
}
}
func TestKafkaSinkEscaping(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memoryUnlimited, `☃`)
defer cleanup()
if err := sink.EmitRow(ctx, topic(`☃`), []byte(`k☃`), []byte(`v☃`), zeroTS); err != nil {
t.Fatal(err)
}
m := <-p.inputCh
require.Equal(t, `_u2603_`, m.Topic)
require.Equal(t, sarama.ByteEncoder(`k☃`), m.Key)
require.Equal(t, sarama.ByteEncoder(`v☃`), m.Value)
}
func TestKafkaTopicNameProvided(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
const topicOverride = "general"
p := newAsyncProducerMock(1)
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer cleanup()
//all messages go to the general topic
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, topicOverride, m.Topic)
}
func TestKafkaTopicNameWithPrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := newAsyncProducerMock(1)
const topicPrefix = "prefix-"
const topicOverride = "☃"
sink, clenaup := makeTestKafkaSink(
t, topicPrefix, topicOverride, p, memoryUnlimited, "particular0", "particular1")
defer clenaup()
//the prefix is applied and the name is escaped
require.NoError(t, sink.EmitRow(ctx, topic("particular0"), []byte(`k☃`), []byte(`v☃`), zeroTS))
m := <-p.inputCh
require.Equal(t, `prefix-_u2603_`, m.Topic)
}
// goos: darwin
// goarch: amd64
// pkg: github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl
// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz
// BenchmarkEmitRow-16 573620 1779 ns/op 235 B/op 6 allocs/op
func BenchmarkEmitRow(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
p := newAsyncProducerMock(unbuffered)
const tableName = `defaultdb.public.funky_table☃`
topic := topic(tableName)
sink, cleanup := makeTestKafkaSink(b, noTopicPrefix, defaultTopicName, p, memoryUnlimited, tableName)
stopConsume := p.consumeAndSucceed()
defer func() {
stopConsume()
cleanup()
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, sink.EmitRow(ctx, topic, []byte(`k☃`), []byte(`v☃`), hlc.Timestamp{}))
}
b.ReportAllocs()
}
type testEncoder struct{}
func (testEncoder) EncodeKey(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeValue(context.Context, encodeRow) ([]byte, error) { panic(`unimplemented`) }
func (testEncoder) EncodeResolvedTimestamp(
_ context.Context, _ string, ts hlc.Timestamp,
) ([]byte, error) {
return []byte(ts.String()), nil
}
func TestSQLSink(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
overrideTopic := func(name string) tableDescriptorTopic {
id, _ := strconv.ParseUint(name, 36, 64)
return tableDescriptorTopic{
tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}).BuildImmutableTable()}
}
ctx := context.Background()
s, sqlDBRaw, _ := serverutils.StartServer(t, base.TestServerArgs{UseDatabase: "d"})
defer s.Stopper().Stop(ctx)
sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw)
sqlDB.Exec(t, `CREATE DATABASE d`)
sinkURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser))
defer cleanup()
sinkURL.Path = `d`
fooTopic := overrideTopic(`foo`)
barTopic := overrideTopic(`bar`)
targets := jobspb.ChangefeedTargets{
fooTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `foo`},
barTopic.GetID(): jobspb.ChangefeedTarget{StatementTimeName: `bar`},
}
sink, err := makeSQLSink(sinkURL.String(), `sink`, targets)
require.NoError(t, err)
defer func() { require.NoError(t, sink.Close()) }()
// Empty
require.NoError(t, sink.Flush(ctx))
// Undeclared topic
require.EqualError(t,
sink.EmitRow(ctx, overrideTopic(`nope`), nil, nil, zeroTS), `cannot emit to undeclared topic: `)
// With one row, nothing flushes until Flush is called.
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v0`), zeroTS))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{},
)
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`k1`, `v0`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Verify the implicit flushing
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`0`}})
for i := 0; i < sqlSinkRowBatchSize+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`k1`), []byte(`v`+strconv.Itoa(i)), zeroTS))
}
// Should have auto flushed after sqlSinkRowBatchSize
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`3`}})
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT count(*) FROM sink`, [][]string{{`4`}})
sqlDB.Exec(t, `TRUNCATE sink`)
// Two tables interleaved in time
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, barTopic, []byte(`kbar`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`kfoo`), []byte(`v1`), zeroTS))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT topic, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{{`bar`, `kbar`, `v0`}, {`foo`, `kfoo`, `v0`}, {`foo`, `kfoo`, `v1`}},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Multiple keys interleaved in time. Use sqlSinkNumPartitions+1 keys to
// guarantee that at lease two of them end up in the same partition.
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v0`), zeroTS))
}
for i := 0; i < sqlSinkNumPartitions+1; i++ {
require.NoError(t,
sink.EmitRow(ctx, fooTopic, []byte(`v`+strconv.Itoa(i)), []byte(`v1`), zeroTS))
}
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t, `SELECT partition, key, value FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`0`, `v3`, `v0`},
{`0`, `v3`, `v1`},
{`1`, `v1`, `v0`},
{`1`, `v2`, `v0`},
{`1`, `v1`, `v1`},
{`1`, `v2`, `v1`},
{`2`, `v0`, `v0`},
{`2`, `v0`, `v1`},
},
)
sqlDB.Exec(t, `TRUNCATE sink`)
// Emit resolved
var e testEncoder
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, zeroTS))
require.NoError(t, sink.EmitRow(ctx, fooTopic, []byte(`foo0`), []byte(`v0`), zeroTS))
require.NoError(t, sink.EmitResolvedTimestamp(ctx, e, hlc.Timestamp{WallTime: 1}))
require.NoError(t, sink.Flush(ctx))
sqlDB.CheckQueryResults(t,
`SELECT topic, partition, key, value, resolved FROM sink ORDER BY PRIMARY KEY sink`,
[][]string{
{`bar`, `0`, ``, ``, `0,0`},
{`bar`, `0`, ``, ``, `0.000000001,0`},
{`bar`, `1`, ``, ``, `0,0`},
{`bar`, `1`, ``, ``, `0.000000001,0`},
{`bar`, `2`, ``, ``, `0,0`},
{`bar`, `2`, ``, ``, `0.000000001,0`},
{`foo`, `0`, ``, ``, `0,0`},
{`foo`, `0`, `foo0`, `v0`, ``},
{`foo`, `0`, ``, ``, `0.000000001,0`},
{`foo`, `1`, ``, ``, `0,0`},
{`foo`, `1`, ``, ``, `0.000000001,0`},
{`foo`, `2`, ``, ``, `0,0`},
{`foo`, `2`, ``, ``, `0.000000001,0`},
},
)
}
func TestSaramaConfigOptionParsing(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
opts := make(map[string]string)
cfg, err := getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, defaultSaramaConfig, cfg)
expected := &saramaConfig{}
expected.Flush.MaxMessages = 1000
expected.Flush.Frequency = jsonDuration(time.Second)
opts[changefeedbase.OptKafkaSinkConfig] = `{"Flush": {"MaxMessages": 1000, "Frequency": "1s"}}`
cfg, err = getSaramaConfig(opts)
require.NoError(t, err)
require.Equal(t, expected, cfg)
}
func TestKafkaSinkTracksMemory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
memCapacity := mon.DefaultPoolAllocationSize
// Use fake kafka sink which "consumes" all messages on its input channel,
// but does not acknowledge them automatically (i.e. slow sink)
p := newAsyncProducerMock(unbuffered)
stopConsume := p.consume()
sink, cleanup := makeTestKafkaSink(
t, noTopicPrefix, defaultTopicName, p, memCapacity, "t")
defer func() {
stopConsume()
cleanup()
}()
// No inflight
require.NoError(t, sink.Flush(ctx))
// Emit few messages
rnd, _ := randutil.NewTestPseudoRand()
key := randutil.RandBytes(rnd, 1+rnd.Intn(64))
val := randutil.RandBytes(rnd, 1+rnd.Intn(512))
kvLen := int64(len(key)) + int64(len(val))
testTopic := topic(`t`)
for i := 0; i < 10; i++ {
require.NoError(t, sink.EmitRow(ctx, testTopic, key, val, zeroTS))
}
memUsed := func() int64 {
sink.mu.Lock()
defer sink.mu.Unlock()
return sink.mu.mem.Used()
}
require.Equal(t, 10*kvLen, memUsed())
// Acknowledge outstanding messages, and flush.
p.acknowledge(10, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try emitting resolved timestamp. This message type is different from the
// regular messages since it doesn't have Key set.
// We bypass majority of EmitResolvedTimestamp logic since we don't have
// a real kafka client instantiated. Instead, we call emitMessage directly.
reg := makeTestSchemaRegistry()
defer reg.Close()
opts := map[string]string{
changefeedbase.OptEnvelope: string(changefeedbase.OptEnvelopeWrapped),
changefeedbase.OptConfluentSchemaRegistry: reg.server.URL,
}
encoder, err := newConfluentAvroEncoder(opts, makeChangefeedTargets("t"))
require.NoError(t, err)
payload, err := encoder.EncodeResolvedTimestamp(ctx, "t", hlc.Timestamp{})
require.NoError(t, err)
msg := &sarama.ProducerMessage{
Topic: "t",
Key: nil,
Value: sarama.ByteEncoder(payload),
}
require.NoError(t, sink.emitMessage(ctx, msg))
p.acknowledge(1, p.successesCh)
require.NoError(t, sink.Flush(ctx))
require.EqualValues(t, 0, p.outstanding())
// Try to emit more than we can handle.
expectOverflow := memCapacity / kvLen
for err == nil {
err = sink.EmitRow(ctx, testTopic, key, val, zeroTS)
}
require.Regexp(t, `memory budget exceeded`, err)
// We failed to allocate more memory, but we should have used
// memory for the expectOverflow key/values.
require.EqualValues(t, expectOverflow*kvLen, memUsed())
}
| outstanding | identifier_name |
amqp.rs | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)),
}
}
}
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
}
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
}
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn | (&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Terminating.", &self.sink_url);
}
}
| on_signal | identifier_name |
amqp.rs | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)), |
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
}
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
}
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Terminating.", &self.sink_url);
}
} | }
}
} | random_line_split |
amqp.rs | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)),
}
}
}
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> |
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
}
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Terminating.", &self.sink_url);
}
}
| {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
} | identifier_body |
amqp.rs | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #![cfg_attr(coverage, no_coverage)]
//! # AMQP Offramp
//!
//! The `amqp` offramp allows producing events to an amqp broker.
use crate::channel::{bounded, Receiver};
use crate::sink::prelude::*;
use halfbrown::HashMap;
use lapin::{
options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel,
Connection, ConnectionProperties, PromiseChain,
};
use serde::Deserialize;
use std::{fmt, time::Instant};
use tremor_common::url::TremorUrl;
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct Config {
pub(crate) amqp_addr: String,
#[serde(default = "Default::default")]
routing_key: String,
#[serde(default = "Default::default")]
exchange: String,
publish_options: BasicPublishOptions,
// headers to use for the messages
#[serde(default = "Default::default")]
pub(crate) headers: HashMap<String, Vec<String>>,
}
impl Config {
async fn channel(&self) -> PromiseChain<Channel> {
match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await {
Ok(connection) => connection.create_channel(),
Err(error) => PromiseChain::new_with_data(Err(error)),
}
}
}
impl ConfigImpl for Config {}
/// Amqp offramp connector
pub(crate) struct Amqp {
sink_url: TremorUrl,
config: Config,
postprocessors: Postprocessors,
reply_channel: Sender<sink::Reply>,
channel: Option<Channel>,
error_rx: Receiver<()>,
error_tx: Sender<()>,
}
impl fmt::Debug for Amqp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Sink::{}] RoutingKey: {}",
&self.sink_url, self.config.routing_key
)
}
}
pub(crate) struct Builder {}
impl offramp::Builder for Builder {
fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
if let Some(config) = config {
let config: Config = Config::new(config)?;
let (dummy_tx, _) = bounded(1);
let (error_tx, error_rx) = bounded(qsize());
Ok(SinkManager::new_box(Amqp {
sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy
config,
postprocessors: vec![],
reply_channel: dummy_tx,
channel: None,
error_rx,
error_tx,
}))
} else {
Err("Amqp offramp requires a config".into())
}
}
}
impl Amqp {
async fn handle_channel(&mut self) -> Result<Option<&Channel>> {
while let Ok(()) = self.error_rx.try_recv() {
self.channel = None;
}
if self.channel.is_none() {
match self.config.channel().await.await {
Ok(channel) => self.channel = Some(channel),
Err(error) => return Err(error.into()),
}
}
return Ok(self.channel.as_ref());
}
}
#[async_trait::async_trait]
impl Sink for Amqp {
async fn on_event(
&mut self,
_input: &str,
codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
self.handle_channel().await?;
let ingest_ns = event.ingest_ns;
let processing_start = Instant::now();
/*
// evaluate here to avoid borrowing again while borrowed.
let config_reply = self.config.reply.as_deref();
let op_meta = &event.op_meta;
self.merged_meta.merge(op_meta.clone());
*/
let insight_event = event.insight_ack();
if let Some(channel) = &mut self.channel {
for (value, _) in event.value_meta_iter() {
let encoded = codec.encode(value)?;
let processed =
postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?;
//let headers = meta.get("nats").and_then(|v| v.get_object("headers"));
for payload in processed {
/*
// prepare message reply
let message_reply = reply.or(config_reply);
*/
// prepare message headers
let properties = BasicProperties::default();
/*
let mut key_val: Vec<(&str, &str)> = Vec::with_capacity(
self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(),
);
for (key, val) in &self.config.headers {
for ele in val.iter() {
key_val.push((key.as_str(), ele.as_str()));
}
}
if let Some(headers) = headers {
for (key, val) in
headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?)))
{
for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) {
key_val.push((key, ele));
}
}
}
let message_headers = if key_val.is_empty() {
None
} else {
Some(Headers::from_iter(key_val))
};
*/
let publish_result = channel
.basic_publish(
self.config.exchange.as_str(),
self.config.routing_key.as_str(),
self.config.publish_options,
payload,
properties,
)
.await?
.await?;
match publish_result {
Confirmation::NotRequested | Confirmation::Ack(_) => |
Confirmation::Nack(err) => {
if let Some(e) = err {
error!(
"[Sink::{}] failed to send message: {} {}",
&self.sink_url, e.reply_code, e.reply_text
);
} else {
error!(
"[Sink::{}] failed to send message: unknown error",
&self.sink_url
);
}
if self.error_tx.send(()).await.is_err() {
error!(
"[Sink::{}] Error notifying the system about amqp error",
&self.sink_url
);
}
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Fail;
self.reply_channel
.send(sink::Reply::Response(ERR, insight))
.await?;
}
}
}
}
}
}
Ok(Vec::new())
}
fn default_codec(&self) -> &str {
"json"
}
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
_sink_uid: u64,
sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
_is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.handle_channel().await?;
self.postprocessors = make_postprocessors(processors.post)?;
self.reply_channel = reply_channel;
self.sink_url = sink_url.clone();
Ok(())
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
//self.drain_fatal_errors()?;
Ok(Vec::new())
}
fn is_active(&self) -> bool {
true
}
fn auto_ack(&self) -> bool {
false
}
async fn terminate(&mut self) {
if let Some(channel) = self.channel.as_ref() {
if let Err(e) = channel.close(0, "terminating sink").await {
error!("[Sink] Failed to close channel: {}", e);
}
if let Err(e) = channel.wait_for_confirms().await {
error!("[Sink] Failed to close channel: {}", e);
};
}
/*if self.channel.in_flight_count() > 0 {
// wait a second in order to flush messages.
let wait_secs = 1;
info!(
"[Sink::{}] Flushing messages. Waiting for {} seconds.",
wait_secs, &self.sink_url
);
self.channel.flush(Duration::from_secs(1));
}*/
info!("[Sink::{}] Terminating.", &self.sink_url);
}
}
| {
if event.transactional {
let mut insight = insight_event.clone();
insight.cb = CbAction::Ack;
// we hopefully enver wait more then u64 ... if we do we got
// bigger problems
#[allow(clippy::cast_possible_truncation)]
let time = processing_start.elapsed().as_millis() as u64;
let mut m = Object::with_capacity(1);
m.insert("time".into(), time.into());
insight.data = (Value::null(), m).into();
self.reply_channel
.send(sink::Reply::Insight(insight.clone()))
.await?;
}
} | conditional_block |
mod.rs | // src/io/mod.rs -- input/output interfaces for Tectonic.
// Copyright 2016-2018 the Tectonic Project
// Licensed under the MIT License.
//! Tectonic’s pluggable I/O backend.
use flate2::read::GzDecoder;
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io::{self, Cursor, Read, Seek, SeekFrom, Write};
use std::path::Path;
use std::str::FromStr;
use crate::ctry;
use crate::digest::{self, Digest, DigestData};
use crate::errors::{Error, ErrorKind, Result};
use crate::status::StatusBackend;
pub mod cached_itarbundle;
pub mod dirbundle;
pub mod filesystem;
pub mod format_cache;
pub mod memory;
pub mod setup;
pub mod stack;
pub mod stdstreams;
pub mod zipbundle;
pub trait InputFeatures: Read {
fn get_size(&mut self) -> Result<usize>;
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>;
}
/// What kind of source an input file ultimately came from. We keep track of
/// this in order to be able to emit Makefile-style dependencies for input
/// files. Right now, we only provide enough options to achieve this goal; we
/// could add more.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum InputOrigin {
/// This file lives on the filesystem and might change under us. (That is
/// it is not a cached bundle file.)
Filesystem,
/// This file was never used as an input.
NotInput,
/// This file is none of the above.
Other,
}
/// Input handles are basically Read objects with a few extras. We don't
/// require the standard io::Seek because we need to provide a dummy
/// implementation for GZip streams, which we wouldn't be allowed to do
/// because both the trait and the target struct are outside of our crate.
///
/// An important role for the InputHandle struct is computing a cryptographic
/// digest of the input file. The driver uses this information in order to
/// figure out if the TeX engine needs rerunning. TeX makes our life more
/// difficult, though, since it has somewhat funky file access patterns. LaTeX
/// file opens work by opening a file and immediately closing it, which tests
/// whether the file exists, and then by opening it again for real. Under the
/// hood, XeTeX reads a couple of bytes from each file upon open to sniff its
/// encoding. So we can't just stream data from `read()` calls into the SHA2
/// computer, since we end up seeking and reading redundant data.
///
/// The current system maintains some internal state that, so far, helps us Do
/// The Right Thing given all this. If there's a seek on the file, we give up
/// on our digest computation. But if there's a seek back to the file
/// beginning, we are open to the possibility of restarting the computation.
/// But if nothing is ever read from the file, we once again give up on the
/// computation. The `ExecutionState` code then has further pieces that track
/// access to nonexistent files, which we treat as being equivalent to an
/// existing empty file for these purposes.
pub struct InputHandle {
name: OsString,
inner: Box<dyn InputFeatures>,
/// Indicates that the file cannot be written to (provided by a read-only IoProvider) and
/// therefore it is useless to compute the digest.
read_only: bool,
digest: digest::DigestComputer,
origin: InputOrigin,
ever_read: bool,
did_unhandled_seek: bool,
}
impl InputHandle {
pub fn new<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: false,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn new_read_only<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: true,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
pub fn origin(&self) -> InputOrigin {
self.origin
}
/// Consumes the object and returns the underlying readable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn InputFeatures> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// read. No digest is returned if there was ever a seek on the input
/// stream, since in that case the results will not be reliable. We also
/// return None if the stream was never read, which is another common
/// TeX access pattern: files are opened, immediately closed, and then
/// opened again. Finally, no digest is returned if the file is marked read-only.
pub fn into_name_digest(self) -> (OsString, Option<DigestData>) {
if self.did_unhandled_seek || !self.ever_read || self.read_only {
(self.name, None)
} else {
(self.name, Some(DigestData::from(self.digest)))
}
}
pub fn getc(&mut self) -> Result<u8> {
let mut byte = [0u8; 1];
if self.read(&mut byte[..1])? == 0 {
// EOF
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into());
}
Ok(byte[0])
}
}
impl Read for InputHandle {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.ever_read = true;
let n = self.inner.read(buf)?;
if !self.read_only {
self.digest.input(&buf[..n]);
}
Ok(n)
}
}
impl InputFeatures for InputHandle {
fn get_size(&mut self) -> Result<usize> {
self.inner.get_size()
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(0) => {
// As described above, there is a common pattern in TeX file
// accesses: read a few bytes to sniff, then go back to the
// beginning. We should tidy up the I/O to just buffer instead
// of seeking, but in the meantime, we can handle this.
self.digest = Default::default();
self.ever_read = false;
}
SeekFrom::Current(0) => {
// Noop. This must *not* clear the ungetc buffer for our
// current PDF startxref/xref parsing code to work.
}
_ => {
self.did_unhandled_seek = true;
}
}
let offset = self.inner.try_seek(pos)?;
Ok(offset)
}
}
pub struct OutputHandle {
name: OsString,
inner: Box<dyn Write>,
digest: digest::DigestComputer,
}
impl OutputHandle {
pub fn new<T: 'static + Write>(name: &OsStr, inner: T) -> OutputHandle {
OutputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
digest: digest::create(),
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
/// Consumes the object and returns the underlying writable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn Write> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// written.
pub fn into_name_digest(self) -> (OsString, DigestData) {
(self.name, DigestData::from(self.digest))
}
}
impl Write for OutputHandle {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
| /// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
(**self).input_open_primary(status)
}
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_format(name, status)
}
fn write_format(
&mut self,
name: &str,
data: &[u8],
status: &mut dyn StatusBackend,
) -> Result<()> {
(**self).write_format(name, data, status)
}
}
/// A special IoProvider that can make TeX format files.
///
/// A “bundle” is expected to contain a large number of TeX support files —
/// for instance, a compilation of a TeXLive distribution. In terms of the
/// software architecture, though, what is special about a bundle is that one
/// can generate one or more TeX format files from its contents without
/// reference to any other I/O resources.
pub trait Bundle: IoProvider {
/// Get a cryptographic digest summarizing this bundle’s contents.
///
/// The digest summarizes the exact contents of every file in the bundle.
/// It is computed from the sorted names and SHA256 digests of the
/// component files [as implemented in the script
/// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138)
/// in the `tectonic-staging` module.
///
/// The default implementation gets the digest from a file name
/// `SHA256SUM`, which is expected to contain the digest in hex-encoded
/// format.
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
h.take(64).read_to_string(&mut text)?;
text
}
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg(
"itar-format bundle does not provide needed SHA256SUM file".to_owned(),
)
.into());
}
OpenResult::Err(e) => {
return Err(e);
}
};
Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data"))
}
}
impl<B: Bundle + ?Sized> Bundle for Box<B> {
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
(**self).get_digest(status)
}
}
// Some generically helpful InputFeatures impls
impl<R: Read> InputFeatures for GzDecoder<R> {
fn get_size(&mut self) -> Result<usize> {
Err(ErrorKind::NotSizeable.into())
}
fn try_seek(&mut self, _: SeekFrom) -> Result<u64> {
Err(ErrorKind::NotSeekable.into())
}
}
impl InputFeatures for Cursor<Vec<u8>> {
fn get_size(&mut self) -> Result<usize> {
Ok(self.get_ref().len())
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
Ok(self.seek(pos)?)
}
}
// Reexports
pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo};
pub use self::memory::MemoryIo;
pub use self::setup::{IoSetup, IoSetupBuilder};
pub use self::stack::IoStack;
pub use self::stdstreams::GenuineStdoutIo;
// Helpful.
pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> {
use std::io::ErrorKind::NotFound;
match File::open(path) {
Ok(f) => OpenResult::Ok(f),
Err(e) => {
if e.kind() == NotFound {
OpenResult::NotAvailable
} else {
OpenResult::Err(e.into())
}
}
}
}
/// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`,
/// or extra separators '/' so that it is of the form
///
/// ```text
/// path/to/my/file.txt
/// ../../path/to/parent/dir/file.txt
/// /absolute/path/to/file.txt
/// ```
///
/// Does not strip whitespace.
///
/// Returns `None` if the path refers to a parent of the root.
fn try_normalize_tex_path(path: &str) -> Option<String> {
use std::iter::repeat;
if path.is_empty() {
return Some("".into());
}
let mut r = Vec::new();
let mut parent_level = 0;
let mut has_root = false;
// TODO: We need to handle a prefix on Windows (i.e. "C:").
for (i, c) in path.split('/').enumerate() {
match c {
"" if i == 0 => {
has_root = true;
r.push("");
}
"" | "." => {}
".." => {
match r.pop() {
// about to pop the root
Some("") => return None,
None => parent_level += 1,
_ => {}
}
}
_ => r.push(c),
}
}
let r = repeat("..")
.take(parent_level)
.chain(r.into_iter())
// No `join` on `Iterator`.
.collect::<Vec<_>>()
.join("/");
if r.is_empty() {
if has_root {
Some("/".into())
} else {
Some(".".into())
}
} else {
Some(r)
}
}
/// Normalize a TeX path if possible, otherwise return the original path.
///
/// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.),
/// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically.
///
/// TODO: This function should operate on `&str` someday, but we need to transition the internals
/// away from `OsStr/OsString` before that can happen.
fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> {
if let Some(t) = path
.to_str()
.and_then(try_normalize_tex_path)
.map(OsString::from)
{
Cow::Owned(t)
} else {
Cow::Borrowed(path)
}
}
// Helper for testing. FIXME: I want this to be conditionally compiled with
// #[cfg(test)] but things break if I do that.
pub mod testing {
use super::*;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::path::{Path, PathBuf};
pub struct SingleInputFileIo {
name: OsString,
full_path: PathBuf,
}
impl SingleInputFileIo {
pub fn new(path: &Path) -> SingleInputFileIo {
let p = path.to_path_buf();
SingleInputFileIo {
name: p.file_name().unwrap().to_os_string(),
full_path: p,
}
}
}
impl IoProvider for SingleInputFileIo {
fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
if name == self.name {
OpenResult::Ok(InputHandle::new(
name,
File::open(&self.full_path).unwrap(),
InputOrigin::Filesystem,
))
} else {
OpenResult::NotAvailable
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_normalize_tex_path() {
// edge cases
assert_eq!(try_normalize_tex_path(""), Some("".into()));
assert_eq!(try_normalize_tex_path("/"), Some("/".into()));
assert_eq!(try_normalize_tex_path("//"), Some("/".into()));
assert_eq!(try_normalize_tex_path("."), Some(".".into()));
assert_eq!(try_normalize_tex_path("./"), Some(".".into()));
assert_eq!(try_normalize_tex_path(".."), Some("..".into()));
assert_eq!(try_normalize_tex_path("././/./"), Some(".".into()));
assert_eq!(try_normalize_tex_path("/././/."), Some("/".into()));
assert_eq!(
try_normalize_tex_path("my/path/file.txt"),
Some("my/path/file.txt".into())
);
// preserve spaces
assert_eq!(
try_normalize_tex_path(" my/pa th/file .txt "),
Some(" my/pa th/file .txt ".into())
);
assert_eq!(
try_normalize_tex_path("/my/path/file.txt"),
Some("/my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my///path/././file.txt"),
Some("my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./../my/../../../file.txt"),
Some("../../../file.txt".into())
);
assert_eq!(
try_normalize_tex_path("././my//../path/../here/file.txt"),
Some("here/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my/.././/path/../../here//file.txt"),
Some("../here/file.txt".into())
);
assert_eq!(try_normalize_tex_path("/my/../../file.txt"), None);
assert_eq!(
try_normalize_tex_path("/my/./.././path//../../file.txt"),
None
);
}
}
| OpenResult::NotAvailable
}
| identifier_body |
mod.rs | // src/io/mod.rs -- input/output interfaces for Tectonic.
// Copyright 2016-2018 the Tectonic Project
// Licensed under the MIT License.
//! Tectonic’s pluggable I/O backend.
use flate2::read::GzDecoder;
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io::{self, Cursor, Read, Seek, SeekFrom, Write};
use std::path::Path;
use std::str::FromStr;
use crate::ctry;
use crate::digest::{self, Digest, DigestData};
use crate::errors::{Error, ErrorKind, Result};
use crate::status::StatusBackend;
pub mod cached_itarbundle;
pub mod dirbundle;
pub mod filesystem;
pub mod format_cache;
pub mod memory;
pub mod setup;
pub mod stack;
pub mod stdstreams;
pub mod zipbundle;
pub trait InputFeatures: Read {
fn get_size(&mut self) -> Result<usize>;
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>;
}
/// What kind of source an input file ultimately came from. We keep track of
/// this in order to be able to emit Makefile-style dependencies for input
/// files. Right now, we only provide enough options to achieve this goal; we
/// could add more.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum InputOrigin {
/// This file lives on the filesystem and might change under us. (That is
/// it is not a cached bundle file.)
Filesystem,
/// This file was never used as an input.
NotInput,
/// This file is none of the above.
Other,
}
/// Input handles are basically Read objects with a few extras. We don't
/// require the standard io::Seek because we need to provide a dummy
/// implementation for GZip streams, which we wouldn't be allowed to do
/// because both the trait and the target struct are outside of our crate.
///
/// An important role for the InputHandle struct is computing a cryptographic
/// digest of the input file. The driver uses this information in order to
/// figure out if the TeX engine needs rerunning. TeX makes our life more
/// difficult, though, since it has somewhat funky file access patterns. LaTeX
/// file opens work by opening a file and immediately closing it, which tests
/// whether the file exists, and then by opening it again for real. Under the
/// hood, XeTeX reads a couple of bytes from each file upon open to sniff its
/// encoding. So we can't just stream data from `read()` calls into the SHA2
/// computer, since we end up seeking and reading redundant data.
///
/// The current system maintains some internal state that, so far, helps us Do
/// The Right Thing given all this. If there's a seek on the file, we give up
/// on our digest computation. But if there's a seek back to the file
/// beginning, we are open to the possibility of restarting the computation.
/// But if nothing is ever read from the file, we once again give up on the
/// computation. The `ExecutionState` code then has further pieces that track
/// access to nonexistent files, which we treat as being equivalent to an
/// existing empty file for these purposes.
pub struct InputHandle {
name: OsString,
inner: Box<dyn InputFeatures>,
/// Indicates that the file cannot be written to (provided by a read-only IoProvider) and
/// therefore it is useless to compute the digest.
read_only: bool,
digest: digest::DigestComputer,
origin: InputOrigin,
ever_read: bool,
did_unhandled_seek: bool,
}
impl InputHandle {
pub fn new<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: false,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn new_read_only<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: true,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
pub fn origin(&self) -> InputOrigin {
self.origin
}
/// Consumes the object and returns the underlying readable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn InputFeatures> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// read. No digest is returned if there was ever a seek on the input
/// stream, since in that case the results will not be reliable. We also
/// return None if the stream was never read, which is another common
/// TeX access pattern: files are opened, immediately closed, and then
/// opened again. Finally, no digest is returned if the file is marked read-only.
pub fn into_name_digest(self) -> (OsString, Option<DigestData>) {
if self.did_unhandled_seek || !self.ever_read || self.read_only {
(self.name, None)
} else {
(self.name, Some(DigestData::from(self.digest)))
}
}
pub fn getc(&mut self) -> Result<u8> {
let mut byte = [0u8; 1];
if self.read(&mut byte[..1])? == 0 {
// EOF
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into());
}
Ok(byte[0])
}
}
impl Read for InputHandle {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.ever_read = true;
let n = self.inner.read(buf)?;
if !self.read_only {
self.digest.input(&buf[..n]);
}
Ok(n)
}
}
impl InputFeatures for InputHandle {
fn get_size(&mut self) -> Result<usize> {
self.inner.get_size()
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(0) => {
// As described above, there is a common pattern in TeX file
// accesses: read a few bytes to sniff, then go back to the
// beginning. We should tidy up the I/O to just buffer instead
// of seeking, but in the meantime, we can handle this.
self.digest = Default::default();
self.ever_read = false;
}
SeekFrom::Current(0) => {
// Noop. This must *not* clear the ungetc buffer for our
// current PDF startxref/xref parsing code to work.
}
_ => {
self.did_unhandled_seek = true;
}
}
let offset = self.inner.try_seek(pos)?;
Ok(offset)
}
}
pub struct OutputHandle {
name: OsString,
inner: Box<dyn Write>,
digest: digest::DigestComputer,
}
impl OutputHandle {
pub fn new<T: 'static + Write>(name: &OsStr, inner: T) -> OutputHandle {
OutputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
digest: digest::create(),
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
/// Consumes the object and returns the underlying writable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn Write> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// written.
pub fn into_name_digest(self) -> (OsString, DigestData) {
(self.name, DigestData::from(self.digest))
}
}
impl Write for OutputHandle {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
(**self).input_open_primary(status)
}
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_format(name, status)
}
fn write_format(
&mut self,
name: &str,
data: &[u8],
status: &mut dyn StatusBackend,
) -> Result<()> {
(**self).write_format(name, data, status)
}
}
/// A special IoProvider that can make TeX format files.
///
/// A “bundle” is expected to contain a large number of TeX support files —
/// for instance, a compilation of a TeXLive distribution. In terms of the
/// software architecture, though, what is special about a bundle is that one
/// can generate one or more TeX format files from its contents without
/// reference to any other I/O resources.
pub trait Bundle: IoProvider {
/// Get a cryptographic digest summarizing this bundle’s contents.
///
/// The digest summarizes the exact contents of every file in the bundle.
/// It is computed from the sorted names and SHA256 digests of the
/// component files [as implemented in the script
/// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138)
/// in the `tectonic-staging` module.
///
/// The default implementation gets the digest from a file name
/// `SHA256SUM`, which is expected to contain the digest in hex-encoded
/// format.
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
h.take(64).read_to_string(&mut text)?;
text
}
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg(
"itar-format bundle does not provide needed SHA256SUM file".to_owned(),
)
.into());
}
OpenResult::Err(e) => {
return Err(e);
}
};
Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data"))
}
}
impl<B: Bundle + ?Sized> Bundle for Box<B> {
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
(**self).get_digest(status)
}
}
// Some generically helpful InputFeatures impls
impl<R: Read> InputFeatures for GzDecoder<R> {
fn get_size(&mut self) -> Result<usize> {
Err(ErrorKind::NotSizeable.into())
}
fn try_seek(&mut self, _: SeekFrom) -> Result<u64> {
Err(ErrorKind::NotSeekable.into())
}
}
impl InputFeatures for Cursor<Vec<u8>> {
fn get_size(&mut self) -> Result<usize> {
Ok(self.get_ref().len())
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
Ok(self.seek(pos)?)
}
}
// Reexports
pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo};
pub use self::memory::MemoryIo;
pub use self::setup::{IoSetup, IoSetupBuilder};
pub use self::stack::IoStack;
pub use self::stdstreams::GenuineStdoutIo;
// Helpful.
pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> {
use std::io::ErrorKind::NotFound;
match File::open(path) {
Ok(f) => OpenResult::Ok(f),
Err(e) => {
if e.kind() == NotFound {
OpenResult::NotAvailable
} else {
OpenResult::Err(e.into())
}
}
}
}
/// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`,
/// or extra separators '/' so that it is of the form
///
/// ```text
/// path/to/my/file.txt
/// ../../path/to/parent/dir/file.txt
/// /absolute/path/to/file.txt
/// ```
///
/// Does not strip whitespace.
///
/// Returns `None` if the path refers to a parent of the root.
fn try_normalize_tex_path(path: &str) -> Option<String> {
use std::iter::repeat;
if path.is_empty() {
return Some("".into());
}
let mut r = Vec::new();
let mut parent_level = 0;
let mut has_root = false;
// TODO: We need to handle a prefix on Windows (i.e. "C:").
for (i, c) in path.split('/').enumerate() {
match c {
"" if i == 0 => {
has_root = true;
r.push("");
}
"" | "." => {}
".." => {
match r.pop() {
// about to pop the root
Some("") => return None,
None => parent_level += 1, | _ => r.push(c),
}
}
let r = repeat("..")
.take(parent_level)
.chain(r.into_iter())
// No `join` on `Iterator`.
.collect::<Vec<_>>()
.join("/");
if r.is_empty() {
if has_root {
Some("/".into())
} else {
Some(".".into())
}
} else {
Some(r)
}
}
/// Normalize a TeX path if possible, otherwise return the original path.
///
/// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.),
/// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically.
///
/// TODO: This function should operate on `&str` someday, but we need to transition the internals
/// away from `OsStr/OsString` before that can happen.
fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> {
if let Some(t) = path
.to_str()
.and_then(try_normalize_tex_path)
.map(OsString::from)
{
Cow::Owned(t)
} else {
Cow::Borrowed(path)
}
}
// Helper for testing. FIXME: I want this to be conditionally compiled with
// #[cfg(test)] but things break if I do that.
pub mod testing {
use super::*;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::path::{Path, PathBuf};
pub struct SingleInputFileIo {
name: OsString,
full_path: PathBuf,
}
impl SingleInputFileIo {
pub fn new(path: &Path) -> SingleInputFileIo {
let p = path.to_path_buf();
SingleInputFileIo {
name: p.file_name().unwrap().to_os_string(),
full_path: p,
}
}
}
impl IoProvider for SingleInputFileIo {
fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
if name == self.name {
OpenResult::Ok(InputHandle::new(
name,
File::open(&self.full_path).unwrap(),
InputOrigin::Filesystem,
))
} else {
OpenResult::NotAvailable
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_normalize_tex_path() {
// edge cases
assert_eq!(try_normalize_tex_path(""), Some("".into()));
assert_eq!(try_normalize_tex_path("/"), Some("/".into()));
assert_eq!(try_normalize_tex_path("//"), Some("/".into()));
assert_eq!(try_normalize_tex_path("."), Some(".".into()));
assert_eq!(try_normalize_tex_path("./"), Some(".".into()));
assert_eq!(try_normalize_tex_path(".."), Some("..".into()));
assert_eq!(try_normalize_tex_path("././/./"), Some(".".into()));
assert_eq!(try_normalize_tex_path("/././/."), Some("/".into()));
assert_eq!(
try_normalize_tex_path("my/path/file.txt"),
Some("my/path/file.txt".into())
);
// preserve spaces
assert_eq!(
try_normalize_tex_path(" my/pa th/file .txt "),
Some(" my/pa th/file .txt ".into())
);
assert_eq!(
try_normalize_tex_path("/my/path/file.txt"),
Some("/my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my///path/././file.txt"),
Some("my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./../my/../../../file.txt"),
Some("../../../file.txt".into())
);
assert_eq!(
try_normalize_tex_path("././my//../path/../here/file.txt"),
Some("here/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my/.././/path/../../here//file.txt"),
Some("../here/file.txt".into())
);
assert_eq!(try_normalize_tex_path("/my/../../file.txt"), None);
assert_eq!(
try_normalize_tex_path("/my/./.././path//../../file.txt"),
None
);
}
} | _ => {}
}
} | random_line_split |
mod.rs | // src/io/mod.rs -- input/output interfaces for Tectonic.
// Copyright 2016-2018 the Tectonic Project
// Licensed under the MIT License.
//! Tectonic’s pluggable I/O backend.
use flate2::read::GzDecoder;
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io::{self, Cursor, Read, Seek, SeekFrom, Write};
use std::path::Path;
use std::str::FromStr;
use crate::ctry;
use crate::digest::{self, Digest, DigestData};
use crate::errors::{Error, ErrorKind, Result};
use crate::status::StatusBackend;
pub mod cached_itarbundle;
pub mod dirbundle;
pub mod filesystem;
pub mod format_cache;
pub mod memory;
pub mod setup;
pub mod stack;
pub mod stdstreams;
pub mod zipbundle;
pub trait InputFeatures: Read {
fn get_size(&mut self) -> Result<usize>;
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>;
}
/// What kind of source an input file ultimately came from. We keep track of
/// this in order to be able to emit Makefile-style dependencies for input
/// files. Right now, we only provide enough options to achieve this goal; we
/// could add more.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum InputOrigin {
/// This file lives on the filesystem and might change under us. (That is
/// it is not a cached bundle file.)
Filesystem,
/// This file was never used as an input.
NotInput,
/// This file is none of the above.
Other,
}
/// Input handles are basically Read objects with a few extras. We don't
/// require the standard io::Seek because we need to provide a dummy
/// implementation for GZip streams, which we wouldn't be allowed to do
/// because both the trait and the target struct are outside of our crate.
///
/// An important role for the InputHandle struct is computing a cryptographic
/// digest of the input file. The driver uses this information in order to
/// figure out if the TeX engine needs rerunning. TeX makes our life more
/// difficult, though, since it has somewhat funky file access patterns. LaTeX
/// file opens work by opening a file and immediately closing it, which tests
/// whether the file exists, and then by opening it again for real. Under the
/// hood, XeTeX reads a couple of bytes from each file upon open to sniff its
/// encoding. So we can't just stream data from `read()` calls into the SHA2
/// computer, since we end up seeking and reading redundant data.
///
/// The current system maintains some internal state that, so far, helps us Do
/// The Right Thing given all this. If there's a seek on the file, we give up
/// on our digest computation. But if there's a seek back to the file
/// beginning, we are open to the possibility of restarting the computation.
/// But if nothing is ever read from the file, we once again give up on the
/// computation. The `ExecutionState` code then has further pieces that track
/// access to nonexistent files, which we treat as being equivalent to an
/// existing empty file for these purposes.
pub struct InputHandle {
name: OsString,
inner: Box<dyn InputFeatures>,
/// Indicates that the file cannot be written to (provided by a read-only IoProvider) and
/// therefore it is useless to compute the digest.
read_only: bool,
digest: digest::DigestComputer,
origin: InputOrigin,
ever_read: bool,
did_unhandled_seek: bool,
}
impl InputHandle {
pub fn new<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: false,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn new_read_only<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: true,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
pub fn origin(&self) -> InputOrigin {
self.origin
}
/// Consumes the object and returns the underlying readable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn InputFeatures> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// read. No digest is returned if there was ever a seek on the input
/// stream, since in that case the results will not be reliable. We also
/// return None if the stream was never read, which is another common
/// TeX access pattern: files are opened, immediately closed, and then
/// opened again. Finally, no digest is returned if the file is marked read-only.
pub fn into_name_digest(self) -> (OsString, Option<DigestData>) {
if self.did_unhandled_seek || !self.ever_read || self.read_only {
(self.name, None)
} else {
(self.name, Some(DigestData::from(self.digest)))
}
}
pub fn getc(&mut self) -> Result<u8> {
let mut byte = [0u8; 1];
if self.read(&mut byte[..1])? == 0 {
// EOF
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into());
}
Ok(byte[0])
}
}
impl Read for InputHandle {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.ever_read = true;
let n = self.inner.read(buf)?;
if !self.read_only {
self.digest.input(&buf[..n]);
}
Ok(n)
}
}
impl InputFeatures for InputHandle {
fn get_size(&mut self) -> Result<usize> {
self.inner.get_size()
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(0) => {
// As described above, there is a common pattern in TeX file
// accesses: read a few bytes to sniff, then go back to the
// beginning. We should tidy up the I/O to just buffer instead
// of seeking, but in the meantime, we can handle this.
self.digest = Default::default();
self.ever_read = false;
}
SeekFrom::Current(0) => {
// Noop. This must *not* clear the ungetc buffer for our
// current PDF startxref/xref parsing code to work.
}
_ => {
self.did_unhandled_seek = true;
}
}
let offset = self.inner.try_seek(pos)?;
Ok(offset)
}
}
pub struct OutputHandle {
name: OsString,
inner: Box<dyn Write>,
digest: digest::DigestComputer,
}
impl OutputHandle {
pub fn new<T: 'static + Write>(name: &OsStr, inner: T) -> OutputHandle {
OutputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
digest: digest::create(),
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
/// Consumes the object and returns the underlying writable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn Write> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// written.
pub fn into_name_digest(self) -> (OsString, DigestData) {
(self.name, DigestData::from(self.digest))
}
}
impl Write for OutputHandle {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
(**self).input_open_primary(status)
}
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_format(name, status)
}
fn write_format(
&mut self,
name: &str,
data: &[u8],
status: &mut dyn StatusBackend,
) -> Result<()> {
(**self).write_format(name, data, status)
}
}
/// A special IoProvider that can make TeX format files.
///
/// A “bundle” is expected to contain a large number of TeX support files —
/// for instance, a compilation of a TeXLive distribution. In terms of the
/// software architecture, though, what is special about a bundle is that one
/// can generate one or more TeX format files from its contents without
/// reference to any other I/O resources.
pub trait Bundle: IoProvider {
/// Get a cryptographic digest summarizing this bundle’s contents.
///
/// The digest summarizes the exact contents of every file in the bundle.
/// It is computed from the sorted names and SHA256 digests of the
/// component files [as implemented in the script
/// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138)
/// in the `tectonic-staging` module.
///
/// The default implementation gets the digest from a file name
/// `SHA256SUM`, which is expected to contain the digest in hex-encoded
/// format.
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
h.take(64).read_to_string(&mut text)?;
text
}
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg(
"itar-format bundle does not provide needed SHA256SUM file".to_owned(),
)
.into());
}
OpenResult::Err(e) => {
return Err(e);
}
};
Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data"))
}
}
impl<B: Bundle + ?Sized> Bundle for Box<B> {
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
(**self).get_digest(status)
}
}
// Some generically helpful InputFeatures impls
impl<R: Read> InputFeatures for GzDecoder<R> {
fn get_size(&mut self) -> Result<usize> {
Err(ErrorKind::NotSizeable.into())
}
fn try_seek(&mut self, _: SeekFrom) -> Result<u64> {
Err(ErrorKind::NotSeekable.into())
}
}
impl InputFeatures for Cursor<Vec<u8>> {
fn get_size(&mut self) -> Result<usize> {
Ok(self.get_ref().len())
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
Ok(self.seek(pos)?)
}
}
// Reexports
pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo};
pub use self::memory::MemoryIo;
pub use self::setup::{IoSetup, IoSetupBuilder};
pub use self::stack::IoStack;
pub use self::stdstreams::GenuineStdoutIo;
// Helpful.
pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> {
use std::io::ErrorKind::NotFound;
match File::open(path) {
Ok(f) => OpenResult::Ok(f),
Err(e) => {
if e.kind() == NotFound {
OpenResult::NotAvailable
} else {
|
}
}
/// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`,
/// or extra separators '/' so that it is of the form
///
/// ```text
/// path/to/my/file.txt
/// ../../path/to/parent/dir/file.txt
/// /absolute/path/to/file.txt
/// ```
///
/// Does not strip whitespace.
///
/// Returns `None` if the path refers to a parent of the root.
fn try_normalize_tex_path(path: &str) -> Option<String> {
use std::iter::repeat;
if path.is_empty() {
return Some("".into());
}
let mut r = Vec::new();
let mut parent_level = 0;
let mut has_root = false;
// TODO: We need to handle a prefix on Windows (i.e. "C:").
for (i, c) in path.split('/').enumerate() {
match c {
"" if i == 0 => {
has_root = true;
r.push("");
}
"" | "." => {}
".." => {
match r.pop() {
// about to pop the root
Some("") => return None,
None => parent_level += 1,
_ => {}
}
}
_ => r.push(c),
}
}
let r = repeat("..")
.take(parent_level)
.chain(r.into_iter())
// No `join` on `Iterator`.
.collect::<Vec<_>>()
.join("/");
if r.is_empty() {
if has_root {
Some("/".into())
} else {
Some(".".into())
}
} else {
Some(r)
}
}
/// Normalize a TeX path if possible, otherwise return the original path.
///
/// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.),
/// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically.
///
/// TODO: This function should operate on `&str` someday, but we need to transition the internals
/// away from `OsStr/OsString` before that can happen.
fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> {
if let Some(t) = path
.to_str()
.and_then(try_normalize_tex_path)
.map(OsString::from)
{
Cow::Owned(t)
} else {
Cow::Borrowed(path)
}
}
// Helper for testing. FIXME: I want this to be conditionally compiled with
// #[cfg(test)] but things break if I do that.
pub mod testing {
use super::*;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::path::{Path, PathBuf};
pub struct SingleInputFileIo {
name: OsString,
full_path: PathBuf,
}
impl SingleInputFileIo {
pub fn new(path: &Path) -> SingleInputFileIo {
let p = path.to_path_buf();
SingleInputFileIo {
name: p.file_name().unwrap().to_os_string(),
full_path: p,
}
}
}
impl IoProvider for SingleInputFileIo {
fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
if name == self.name {
OpenResult::Ok(InputHandle::new(
name,
File::open(&self.full_path).unwrap(),
InputOrigin::Filesystem,
))
} else {
OpenResult::NotAvailable
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_normalize_tex_path() {
// edge cases
assert_eq!(try_normalize_tex_path(""), Some("".into()));
assert_eq!(try_normalize_tex_path("/"), Some("/".into()));
assert_eq!(try_normalize_tex_path("//"), Some("/".into()));
assert_eq!(try_normalize_tex_path("."), Some(".".into()));
assert_eq!(try_normalize_tex_path("./"), Some(".".into()));
assert_eq!(try_normalize_tex_path(".."), Some("..".into()));
assert_eq!(try_normalize_tex_path("././/./"), Some(".".into()));
assert_eq!(try_normalize_tex_path("/././/."), Some("/".into()));
assert_eq!(
try_normalize_tex_path("my/path/file.txt"),
Some("my/path/file.txt".into())
);
// preserve spaces
assert_eq!(
try_normalize_tex_path(" my/pa th/file .txt "),
Some(" my/pa th/file .txt ".into())
);
assert_eq!(
try_normalize_tex_path("/my/path/file.txt"),
Some("/my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my///path/././file.txt"),
Some("my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./../my/../../../file.txt"),
Some("../../../file.txt".into())
);
assert_eq!(
try_normalize_tex_path("././my//../path/../here/file.txt"),
Some("here/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my/.././/path/../../here//file.txt"),
Some("../here/file.txt".into())
);
assert_eq!(try_normalize_tex_path("/my/../../file.txt"), None);
assert_eq!(
try_normalize_tex_path("/my/./.././path//../../file.txt"),
None
);
}
}
| OpenResult::Err(e.into())
}
} | conditional_block |
mod.rs | // src/io/mod.rs -- input/output interfaces for Tectonic.
// Copyright 2016-2018 the Tectonic Project
// Licensed under the MIT License.
//! Tectonic’s pluggable I/O backend.
use flate2::read::GzDecoder;
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io::{self, Cursor, Read, Seek, SeekFrom, Write};
use std::path::Path;
use std::str::FromStr;
use crate::ctry;
use crate::digest::{self, Digest, DigestData};
use crate::errors::{Error, ErrorKind, Result};
use crate::status::StatusBackend;
pub mod cached_itarbundle;
pub mod dirbundle;
pub mod filesystem;
pub mod format_cache;
pub mod memory;
pub mod setup;
pub mod stack;
pub mod stdstreams;
pub mod zipbundle;
pub trait InputFeatures: Read {
fn get_size(&mut self) -> Result<usize>;
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>;
}
/// What kind of source an input file ultimately came from. We keep track of
/// this in order to be able to emit Makefile-style dependencies for input
/// files. Right now, we only provide enough options to achieve this goal; we
/// could add more.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum InputOrigin {
/// This file lives on the filesystem and might change under us. (That is
/// it is not a cached bundle file.)
Filesystem,
/// This file was never used as an input.
NotInput,
/// This file is none of the above.
Other,
}
/// Input handles are basically Read objects with a few extras. We don't
/// require the standard io::Seek because we need to provide a dummy
/// implementation for GZip streams, which we wouldn't be allowed to do
/// because both the trait and the target struct are outside of our crate.
///
/// An important role for the InputHandle struct is computing a cryptographic
/// digest of the input file. The driver uses this information in order to
/// figure out if the TeX engine needs rerunning. TeX makes our life more
/// difficult, though, since it has somewhat funky file access patterns. LaTeX
/// file opens work by opening a file and immediately closing it, which tests
/// whether the file exists, and then by opening it again for real. Under the
/// hood, XeTeX reads a couple of bytes from each file upon open to sniff its
/// encoding. So we can't just stream data from `read()` calls into the SHA2
/// computer, since we end up seeking and reading redundant data.
///
/// The current system maintains some internal state that, so far, helps us Do
/// The Right Thing given all this. If there's a seek on the file, we give up
/// on our digest computation. But if there's a seek back to the file
/// beginning, we are open to the possibility of restarting the computation.
/// But if nothing is ever read from the file, we once again give up on the
/// computation. The `ExecutionState` code then has further pieces that track
/// access to nonexistent files, which we treat as being equivalent to an
/// existing empty file for these purposes.
pub struct InputHandle {
name: OsString,
inner: Box<dyn InputFeatures>,
/// Indicates that the file cannot be written to (provided by a read-only IoProvider) and
/// therefore it is useless to compute the digest.
read_only: bool,
digest: digest::DigestComputer,
origin: InputOrigin,
ever_read: bool,
did_unhandled_seek: bool,
}
impl InputHandle {
pub fn new<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: false,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn new_read_only<T: 'static + InputFeatures>(
name: &OsStr,
inner: T,
origin: InputOrigin,
) -> InputHandle {
InputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
read_only: true,
digest: Default::default(),
origin,
ever_read: false,
did_unhandled_seek: false,
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
pub fn origin(&self) -> InputOrigin {
self.origin
}
/// Consumes the object and returns the underlying readable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn InputFeatures> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// read. No digest is returned if there was ever a seek on the input
/// stream, since in that case the results will not be reliable. We also
/// return None if the stream was never read, which is another common
/// TeX access pattern: files are opened, immediately closed, and then
/// opened again. Finally, no digest is returned if the file is marked read-only.
pub fn into_name_digest(self) -> (OsString, Option<DigestData>) {
if self.did_unhandled_seek || !self.ever_read || self.read_only {
(self.name, None)
} else {
(self.name, Some(DigestData::from(self.digest)))
}
}
pub fn getc(&mut self) -> Result<u8> {
let mut byte = [0u8; 1];
if self.read(&mut byte[..1])? == 0 {
// EOF
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into());
}
Ok(byte[0])
}
}
impl Read for InputHandle {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.ever_read = true;
let n = self.inner.read(buf)?;
if !self.read_only {
self.digest.input(&buf[..n]);
}
Ok(n)
}
}
impl InputFeatures for InputHandle {
fn get_size(&mut self) -> Result<usize> {
self.inner.get_size()
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(0) => {
// As described above, there is a common pattern in TeX file
// accesses: read a few bytes to sniff, then go back to the
// beginning. We should tidy up the I/O to just buffer instead
// of seeking, but in the meantime, we can handle this.
self.digest = Default::default();
self.ever_read = false;
}
SeekFrom::Current(0) => {
// Noop. This must *not* clear the ungetc buffer for our
// current PDF startxref/xref parsing code to work.
}
_ => {
self.did_unhandled_seek = true;
}
}
let offset = self.inner.try_seek(pos)?;
Ok(offset)
}
}
pub struct OutputHandle {
name: OsString,
inner: Box<dyn Write>,
digest: digest::DigestComputer,
}
impl OutputHandle {
pub fn ne | : 'static + Write>(name: &OsStr, inner: T) -> OutputHandle {
OutputHandle {
name: name.to_os_string(),
inner: Box::new(inner),
digest: digest::create(),
}
}
pub fn name(&self) -> &OsStr {
self.name.as_os_str()
}
/// Consumes the object and returns the underlying writable handle that
/// it references.
pub fn into_inner(self) -> Box<dyn Write> {
self.inner
}
/// Consumes the object and returns the SHA256 sum of the content that was
/// written.
pub fn into_name_digest(self) -> (OsString, DigestData) {
(self.name, DigestData::from(self.digest))
}
}
impl Write for OutputHandle {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = self.inner.write(buf)?;
self.digest.input(&buf[..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
// An Io provider is a source of handles. One wrinkle is that it's good to be
// able to distinguish between unavailability of a given name and error
// accessing it. We take file paths as OsStrs, although since we parse input
// files as Unicode it may not be possible to actually express zany
// non-Unicode Unix paths inside the engine.
#[derive(Debug)]
pub enum OpenResult<T> {
Ok(T),
NotAvailable,
Err(Error),
}
impl<T> OpenResult<T> {
pub fn unwrap(self) -> T {
match self {
OpenResult::Ok(t) => t,
_ => panic!("expected an open file"),
}
}
/// Returns true if this result is of the NotAvailable variant.
pub fn is_not_available(&self) -> bool {
if let OpenResult::NotAvailable = *self {
true
} else {
false
}
}
/// Convert this object into a plain Result, erroring if the item was not available.
pub fn must_exist(self) -> Result<T> {
match self {
OpenResult::Ok(t) => Ok(t),
OpenResult::Err(e) => Err(e),
OpenResult::NotAvailable => {
Err(io::Error::new(io::ErrorKind::NotFound, "not found").into())
}
}
}
}
/// A hack to allow casting of Bundles to IoProviders.
///
/// The code that sets up the I/O stack is handed a reference to a Bundle
/// trait object. For the actual I/O, it needs to convert this to an
/// IoProvider trait object. [According to
/// StackExchange](https://stackoverflow.com/a/28664881/3760486), the
/// following pattern is the least-bad way to achieve the necessary upcasting.
pub trait AsIoProviderMut {
/// Represent this value as an IoProvider trait object.
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider;
}
impl<T: IoProvider> AsIoProviderMut for T {
fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider {
self
}
}
/// A trait for types that can read or write files needed by the TeX engine.
pub trait IoProvider: AsIoProviderMut {
fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
_name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open the "primary" input file, which in the context of TeX is the main
/// input that it's given. When the build is being done using the
/// filesystem and the input is a file on the filesystem, this function
/// isn't necesssarily that important, but those conditions don't always
/// hold.
fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
OpenResult::NotAvailable
}
/// Open a format file with the specified name. Format files have a
/// specialized entry point because IOProviders may wish to handle them
/// specially: namely, to munge the filename to one that includes the
/// current version of the Tectonic engine, since the format contents
/// depend sensitively on the engine internals.
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
self.input_open_name(name, status)
}
/// Save an a format dump in some way that this provider may be able to
/// recover in the future. This awkward interface is needed for to write
/// formats with their special munged file names.
fn write_format(
&mut self,
_name: &str,
_data: &[u8],
_status: &mut dyn StatusBackend,
) -> Result<()> {
Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into())
}
}
impl<P: IoProvider + ?Sized> IoProvider for Box<P> {
fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> {
(**self).output_open_name(name)
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
(**self).output_open_stdout()
}
fn input_open_name(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_name(name, status)
}
fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> {
(**self).input_open_primary(status)
}
fn input_open_format(
&mut self,
name: &OsStr,
status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
(**self).input_open_format(name, status)
}
fn write_format(
&mut self,
name: &str,
data: &[u8],
status: &mut dyn StatusBackend,
) -> Result<()> {
(**self).write_format(name, data, status)
}
}
/// A special IoProvider that can make TeX format files.
///
/// A “bundle” is expected to contain a large number of TeX support files —
/// for instance, a compilation of a TeXLive distribution. In terms of the
/// software architecture, though, what is special about a bundle is that one
/// can generate one or more TeX format files from its contents without
/// reference to any other I/O resources.
pub trait Bundle: IoProvider {
/// Get a cryptographic digest summarizing this bundle’s contents.
///
/// The digest summarizes the exact contents of every file in the bundle.
/// It is computed from the sorted names and SHA256 digests of the
/// component files [as implemented in the script
/// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138)
/// in the `tectonic-staging` module.
///
/// The default implementation gets the digest from a file name
/// `SHA256SUM`, which is expected to contain the digest in hex-encoded
/// format.
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
h.take(64).read_to_string(&mut text)?;
text
}
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg(
"itar-format bundle does not provide needed SHA256SUM file".to_owned(),
)
.into());
}
OpenResult::Err(e) => {
return Err(e);
}
};
Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data"))
}
}
impl<B: Bundle + ?Sized> Bundle for Box<B> {
fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> {
(**self).get_digest(status)
}
}
// Some generically helpful InputFeatures impls
impl<R: Read> InputFeatures for GzDecoder<R> {
fn get_size(&mut self) -> Result<usize> {
Err(ErrorKind::NotSizeable.into())
}
fn try_seek(&mut self, _: SeekFrom) -> Result<u64> {
Err(ErrorKind::NotSeekable.into())
}
}
impl InputFeatures for Cursor<Vec<u8>> {
fn get_size(&mut self) -> Result<usize> {
Ok(self.get_ref().len())
}
fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> {
Ok(self.seek(pos)?)
}
}
// Reexports
pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo};
pub use self::memory::MemoryIo;
pub use self::setup::{IoSetup, IoSetupBuilder};
pub use self::stack::IoStack;
pub use self::stdstreams::GenuineStdoutIo;
// Helpful.
pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> {
use std::io::ErrorKind::NotFound;
match File::open(path) {
Ok(f) => OpenResult::Ok(f),
Err(e) => {
if e.kind() == NotFound {
OpenResult::NotAvailable
} else {
OpenResult::Err(e.into())
}
}
}
}
/// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`,
/// or extra separators '/' so that it is of the form
///
/// ```text
/// path/to/my/file.txt
/// ../../path/to/parent/dir/file.txt
/// /absolute/path/to/file.txt
/// ```
///
/// Does not strip whitespace.
///
/// Returns `None` if the path refers to a parent of the root.
fn try_normalize_tex_path(path: &str) -> Option<String> {
use std::iter::repeat;
if path.is_empty() {
return Some("".into());
}
let mut r = Vec::new();
let mut parent_level = 0;
let mut has_root = false;
// TODO: We need to handle a prefix on Windows (i.e. "C:").
for (i, c) in path.split('/').enumerate() {
match c {
"" if i == 0 => {
has_root = true;
r.push("");
}
"" | "." => {}
".." => {
match r.pop() {
// about to pop the root
Some("") => return None,
None => parent_level += 1,
_ => {}
}
}
_ => r.push(c),
}
}
let r = repeat("..")
.take(parent_level)
.chain(r.into_iter())
// No `join` on `Iterator`.
.collect::<Vec<_>>()
.join("/");
if r.is_empty() {
if has_root {
Some("/".into())
} else {
Some(".".into())
}
} else {
Some(r)
}
}
/// Normalize a TeX path if possible, otherwise return the original path.
///
/// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.),
/// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically.
///
/// TODO: This function should operate on `&str` someday, but we need to transition the internals
/// away from `OsStr/OsString` before that can happen.
fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> {
if let Some(t) = path
.to_str()
.and_then(try_normalize_tex_path)
.map(OsString::from)
{
Cow::Owned(t)
} else {
Cow::Borrowed(path)
}
}
// Helper for testing. FIXME: I want this to be conditionally compiled with
// #[cfg(test)] but things break if I do that.
pub mod testing {
use super::*;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::path::{Path, PathBuf};
pub struct SingleInputFileIo {
name: OsString,
full_path: PathBuf,
}
impl SingleInputFileIo {
pub fn new(path: &Path) -> SingleInputFileIo {
let p = path.to_path_buf();
SingleInputFileIo {
name: p.file_name().unwrap().to_os_string(),
full_path: p,
}
}
}
impl IoProvider for SingleInputFileIo {
fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> {
OpenResult::NotAvailable
}
fn input_open_name(
&mut self,
name: &OsStr,
_status: &mut dyn StatusBackend,
) -> OpenResult<InputHandle> {
if name == self.name {
OpenResult::Ok(InputHandle::new(
name,
File::open(&self.full_path).unwrap(),
InputOrigin::Filesystem,
))
} else {
OpenResult::NotAvailable
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_normalize_tex_path() {
// edge cases
assert_eq!(try_normalize_tex_path(""), Some("".into()));
assert_eq!(try_normalize_tex_path("/"), Some("/".into()));
assert_eq!(try_normalize_tex_path("//"), Some("/".into()));
assert_eq!(try_normalize_tex_path("."), Some(".".into()));
assert_eq!(try_normalize_tex_path("./"), Some(".".into()));
assert_eq!(try_normalize_tex_path(".."), Some("..".into()));
assert_eq!(try_normalize_tex_path("././/./"), Some(".".into()));
assert_eq!(try_normalize_tex_path("/././/."), Some("/".into()));
assert_eq!(
try_normalize_tex_path("my/path/file.txt"),
Some("my/path/file.txt".into())
);
// preserve spaces
assert_eq!(
try_normalize_tex_path(" my/pa th/file .txt "),
Some(" my/pa th/file .txt ".into())
);
assert_eq!(
try_normalize_tex_path("/my/path/file.txt"),
Some("/my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my///path/././file.txt"),
Some("my/path/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./../my/../../../file.txt"),
Some("../../../file.txt".into())
);
assert_eq!(
try_normalize_tex_path("././my//../path/../here/file.txt"),
Some("here/file.txt".into())
);
assert_eq!(
try_normalize_tex_path("./my/.././/path/../../here//file.txt"),
Some("../here/file.txt".into())
);
assert_eq!(try_normalize_tex_path("/my/../../file.txt"), None);
assert_eq!(
try_normalize_tex_path("/my/./.././path//../../file.txt"),
None
);
}
}
| w<T | identifier_name |
backend_helper.go | /*
* Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package resources
import (
"bytes"
"encoding/json"
"errors"
"io"
"reflect"
"strconv"
"strings"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8string "k8s.io/utils/strings"
"huawei-csi-driver/cli/helper"
xuanwuv1 "huawei-csi-driver/client/apis/xuanwu/v1"
)
const (
ApiVersion = "v1"
XuanWuApiVersion = "xuanwu.huawei.io/v1"
KindSecret = "Secret"
KindConfigMap = "ConfigMap"
KindStorageBackendClaim = "StorageBackendClaim"
YamlSeparator = "---"
)
// BackendConfiguration backend config
type BackendConfiguration struct {
Name string `json:"name,omitempty" yaml:"name"`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"`
}
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) | (claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
jsonStr, ok := configmap.Data["csi.json"]
if !ok {
return result, errors.New("not found csi.json config")
}
backendContent, err := AnalyseBackendExist(jsonStr)
if err != nil {
return nil, err
}
var backends []*BackendConfiguration
if _, ok = backendContent.([]interface{}); ok {
backends, err = LoadMultipleBackendFromConfigmap(jsonStr)
} else {
backends, err = LoadSingleBackendFromConfigmap(jsonStr)
}
if err != nil {
return nil, err
}
for _, backend := range backends {
result[backend.Name] = backend
}
return result, nil
}
//AnalyseBackendExist analyse backend,an error is returned if backends not exist
func AnalyseBackendExist(jsonStr string) (interface{}, error) {
var config map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
backendContent, ok := config["backends"]
if !ok {
return nil, errors.New("not found backends config")
}
return backendContent, nil
}
// LoadSingleBackendFromConfigmap load single backend
func LoadSingleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends *BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return []*BackendConfiguration{config.Backends}, nil
}
// LoadMultipleBackendFromConfigmap load multiple backend
func LoadMultipleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends []*BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return config.Backends, nil
}
// LoadBackendsFromYaml load backend from yaml
func LoadBackendsFromYaml(yamlData []byte) (map[string]*BackendConfiguration, error) {
cleanYamlData := strings.Trim(strings.TrimSpace(string(yamlData)), YamlSeparator)
decoder := yaml.NewDecoder(bytes.NewReader([]byte(cleanYamlData)))
var backends = map[string]*BackendConfiguration{}
config := &BackendConfiguration{}
err := decoder.Decode(config)
for err == nil {
if !reflect.DeepEqual(*config, BackendConfiguration{}) {
backends[config.Name] = config
}
config = &BackendConfiguration{}
err = decoder.Decode(config)
}
if !errors.Is(err, io.EOF) {
return backends, err
}
return backends, nil
}
| ShowWithClaimOption | identifier_name |
backend_helper.go | /*
* Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package resources
import (
"bytes"
"encoding/json"
"errors"
"io"
"reflect"
"strconv"
"strings"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8string "k8s.io/utils/strings"
"huawei-csi-driver/cli/helper"
xuanwuv1 "huawei-csi-driver/client/apis/xuanwu/v1"
)
const (
ApiVersion = "v1"
XuanWuApiVersion = "xuanwu.huawei.io/v1"
KindSecret = "Secret"
KindConfigMap = "ConfigMap"
KindStorageBackendClaim = "StorageBackendClaim"
YamlSeparator = "---"
)
// BackendConfiguration backend config
type BackendConfiguration struct {
Name string `json:"name,omitempty" yaml:"name"`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"`
}
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) ShowWithClaimOption(claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
jsonStr, ok := configmap.Data["csi.json"]
if !ok {
return result, errors.New("not found csi.json config")
}
backendContent, err := AnalyseBackendExist(jsonStr)
if err != nil {
return nil, err
}
var backends []*BackendConfiguration
if _, ok = backendContent.([]interface{}); ok | else {
backends, err = LoadSingleBackendFromConfigmap(jsonStr)
}
if err != nil {
return nil, err
}
for _, backend := range backends {
result[backend.Name] = backend
}
return result, nil
}
//AnalyseBackendExist analyse backend,an error is returned if backends not exist
func AnalyseBackendExist(jsonStr string) (interface{}, error) {
var config map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
backendContent, ok := config["backends"]
if !ok {
return nil, errors.New("not found backends config")
}
return backendContent, nil
}
// LoadSingleBackendFromConfigmap load single backend
func LoadSingleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends *BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return []*BackendConfiguration{config.Backends}, nil
}
// LoadMultipleBackendFromConfigmap load multiple backend
func LoadMultipleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends []*BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return config.Backends, nil
}
// LoadBackendsFromYaml load backend from yaml
func LoadBackendsFromYaml(yamlData []byte) (map[string]*BackendConfiguration, error) {
cleanYamlData := strings.Trim(strings.TrimSpace(string(yamlData)), YamlSeparator)
decoder := yaml.NewDecoder(bytes.NewReader([]byte(cleanYamlData)))
var backends = map[string]*BackendConfiguration{}
config := &BackendConfiguration{}
err := decoder.Decode(config)
for err == nil {
if !reflect.DeepEqual(*config, BackendConfiguration{}) {
backends[config.Name] = config
}
config = &BackendConfiguration{}
err = decoder.Decode(config)
}
if !errors.Is(err, io.EOF) {
return backends, err
}
return backends, nil
}
| {
backends, err = LoadMultipleBackendFromConfigmap(jsonStr)
} | conditional_block |
backend_helper.go | /*
* Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package resources
import (
"bytes"
"encoding/json"
"errors"
"io"
"reflect"
"strconv"
"strings"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8string "k8s.io/utils/strings"
"huawei-csi-driver/cli/helper"
xuanwuv1 "huawei-csi-driver/client/apis/xuanwu/v1"
)
const (
ApiVersion = "v1"
XuanWuApiVersion = "xuanwu.huawei.io/v1"
KindSecret = "Secret"
KindConfigMap = "ConfigMap"
KindStorageBackendClaim = "StorageBackendClaim"
YamlSeparator = "---"
)
// BackendConfiguration backend config
type BackendConfiguration struct {
Name string `json:"name,omitempty" yaml:"name"`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"`
}
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) ShowWithClaimOption(claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
jsonStr, ok := configmap.Data["csi.json"]
if !ok {
return result, errors.New("not found csi.json config")
}
backendContent, err := AnalyseBackendExist(jsonStr)
if err != nil {
return nil, err
}
var backends []*BackendConfiguration
if _, ok = backendContent.([]interface{}); ok {
backends, err = LoadMultipleBackendFromConfigmap(jsonStr)
} else {
backends, err = LoadSingleBackendFromConfigmap(jsonStr)
}
if err != nil {
return nil, err
}
for _, backend := range backends {
result[backend.Name] = backend
}
return result, nil
}
//AnalyseBackendExist analyse backend,an error is returned if backends not exist
func AnalyseBackendExist(jsonStr string) (interface{}, error) {
var config map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
backendContent, ok := config["backends"]
if !ok {
return nil, errors.New("not found backends config")
}
return backendContent, nil
}
// LoadSingleBackendFromConfigmap load single backend
func LoadSingleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends *BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return []*BackendConfiguration{config.Backends}, nil
}
// LoadMultipleBackendFromConfigmap load multiple backend
func LoadMultipleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) |
// LoadBackendsFromYaml load backend from yaml
func LoadBackendsFromYaml(yamlData []byte) (map[string]*BackendConfiguration, error) {
cleanYamlData := strings.Trim(strings.TrimSpace(string(yamlData)), YamlSeparator)
decoder := yaml.NewDecoder(bytes.NewReader([]byte(cleanYamlData)))
var backends = map[string]*BackendConfiguration{}
config := &BackendConfiguration{}
err := decoder.Decode(config)
for err == nil {
if !reflect.DeepEqual(*config, BackendConfiguration{}) {
backends[config.Name] = config
}
config = &BackendConfiguration{}
err = decoder.Decode(config)
}
if !errors.Is(err, io.EOF) {
return backends, err
}
return backends, nil
}
| {
config := struct {
Backends []*BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return config.Backends, nil
} | identifier_body |
backend_helper.go | /*
* Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package resources
import (
"bytes"
"encoding/json"
"errors"
"io"
"reflect"
"strconv"
"strings"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8string "k8s.io/utils/strings"
"huawei-csi-driver/cli/helper"
xuanwuv1 "huawei-csi-driver/client/apis/xuanwu/v1"
)
const (
ApiVersion = "v1"
XuanWuApiVersion = "xuanwu.huawei.io/v1"
KindSecret = "Secret"
KindConfigMap = "ConfigMap"
KindStorageBackendClaim = "StorageBackendClaim"
YamlSeparator = "---"
)
// BackendConfiguration backend config
type BackendConfiguration struct {
Name string `json:"name,omitempty" yaml:"name"`
NameSpace string `json:"namespace,omitempty" yaml:"namespace"`
Storage string `json:"storage,omitempty" yaml:"storage"`
VstoreName string `json:"vstoreName,omitempty" yaml:"vstoreName"`
AccountName string `json:"accountName,omitempty" yaml:"accountName"`
Urls []string `json:"urls,omitempty" yaml:"urls"`
Pools []string `json:"pools,omitempty" yaml:"pools"`
MetrovStorePairID string `json:"metrovStorePairID,omitempty" yaml:"metrovStorePairID"`
MetroBackend string `json:"metroBackend,omitempty" yaml:"metroBackend"`
SupportedTopologies []map[string]interface{} `json:"supportedTopologies,omitempty" yaml:"supportedTopologies"`
MaxClientThreads string `json:"maxClientThreads,omitempty" yaml:"maxClientThreads"`
Configured bool `json:"-" yaml:"configured"`
Provisioner string `json:"provisioner,omitempty" yaml:"provisioner"`
Parameters struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol"`
ParentName string `json:"parentname" yaml:"parentname"`
Portals []string `json:"portals,omitempty" yaml:"portals"`
Alua []map[string][]map[string]interface{} `json:"ALUA,omitempty" yaml:"ALUA"`
} `json:"parameters,omitempty" yaml:"parameters"`
}
// BackendShowWide the content echoed by executing the oceanctl get backend -o wide
type BackendShowWide struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"` | }
// BackendShow the content echoed by executing the oceanctl get backend
type BackendShow struct {
Namespace string `show:"NAMESPACE"`
Name string `show:"NAME"`
Protocol string `show:"PROTOCOL"`
StorageType string `show:"STORAGETYPE"`
Sn string `show:"SN"`
Status string `show:"STATUS"`
Online string `show:"ONLINE"`
Url string `show:"Url"`
}
// BackendConfigShow the content echoed by executing the oceanctl create backend
type BackendConfigShow struct {
Number string `show:"NUMBER"`
Configured string `show:"CONFIGURED"`
Name string `show:"NAME"`
Storage string `show:"STORAGE"`
Urls string `show:"URLS"`
}
// StorageBackendClaimConfig used to create a storageBackendClaim object
type StorageBackendClaimConfig struct {
Name string
Namespace string
ConfigmapMeta string
SecretMeta string
MaxClientThreads string
Provisioner string
}
// SecretConfig used to create a secret object
type SecretConfig struct {
Name string
Namespace string
User string
Pwd string
}
// ConfigMapConfig used to create a configmap object
type ConfigMapConfig struct {
Name string
Namespace string
JsonData string
}
// ShowWithContentOption set StorageBackendContent value for BackendShowWide
func (b *BackendShowWide) ShowWithContentOption(content xuanwuv1.StorageBackendContent) *BackendShowWide {
b.StorageBackendContentName = content.Name
if content.Status != nil {
b.Online = strconv.FormatBool(content.Status.Online)
b.VendorName = content.Status.VendorName
b.Sn = content.Status.SN
}
return b
}
// ShowWithConfigOption set BackendConfiguration value for BackendShowWide
func (b *BackendShowWide) ShowWithConfigOption(configuration BackendConfiguration) *BackendShowWide {
b.Url = strings.Join(configuration.Urls, "\n")
return b
}
// ShowWithClaimOption set StorageBackendClaim value for BackendShowWide
func (b *BackendShowWide) ShowWithClaimOption(claim xuanwuv1.StorageBackendClaim) *BackendShowWide {
b.Namespace = claim.Namespace
b.Name = claim.Name
if claim.Status != nil {
b.StorageType = claim.Status.StorageType
b.Protocol = claim.Status.Protocol
b.Status = string(claim.Status.Phase)
}
return b
}
// ToBackendShow convert BackendShowWide to BackendShow
func (b *BackendShowWide) ToBackendShow() BackendShow {
return BackendShow{
Namespace: b.Namespace,
Name: b.Name,
Protocol: b.Protocol,
StorageType: b.StorageType,
Sn: b.Sn,
Status: b.Status,
Online: b.Online,
Url: b.Url,
}
}
// ToStorageBackendClaimConfig covert backend to StorageBackendClaimConfig
func (b *BackendConfiguration) ToStorageBackendClaimConfig() StorageBackendClaimConfig {
return StorageBackendClaimConfig{
Name: b.Name,
Namespace: b.NameSpace,
ConfigmapMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
SecretMeta: k8string.JoinQualifiedName(b.NameSpace, b.Name),
MaxClientThreads: b.MaxClientThreads,
Provisioner: b.Provisioner,
}
}
// ToConfigMapConfig convert backend to helper.ConfigMapConfig
func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {
config := struct {
Backends BackendConfiguration `json:"backends"`
}{*b}
output, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return ConfigMapConfig{}, helper.LogErrorf(" json.MarshalIndent failed: %v", err)
}
return ConfigMapConfig{
Name: b.Name,
Namespace: b.NameSpace,
JsonData: string(output),
}, nil
}
// ToSecretConfig convert backend to helper.SecretConfig
// If start stdin failed, an error will return.
func (b *BackendConfiguration) ToSecretConfig() (SecretConfig, error) {
userName, password, err := helper.StartStdInput()
if err != nil {
return SecretConfig{}, err
}
return SecretConfig{
Name: b.Name,
Namespace: b.NameSpace,
User: userName,
Pwd: password,
}, nil
}
// ToConfigMap convert ConfigMapConfig to ConfigMap resource
func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {
return corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindConfigMap,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Data: map[string]string{
"csi.json": c.JsonData,
},
}
}
// ToSecret convert SecretConfig to Secret resource
func (c *SecretConfig) ToSecret() corev1.Secret {
return corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: ApiVersion,
Kind: KindSecret,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
StringData: map[string]string{
"password": c.Pwd,
"user": c.User,
},
Type: "Opaque",
}
}
// ToStorageBackendClaim convert StorageBackendClaimConfig to Secret StorageBackendClaim
func (c *StorageBackendClaimConfig) ToStorageBackendClaim() xuanwuv1.StorageBackendClaim {
return xuanwuv1.StorageBackendClaim{
TypeMeta: metav1.TypeMeta{
APIVersion: XuanWuApiVersion,
Kind: KindStorageBackendClaim,
},
ObjectMeta: metav1.ObjectMeta{
Name: c.Name,
Namespace: c.Namespace,
},
Spec: xuanwuv1.StorageBackendClaimSpec{
Provider: c.Provisioner,
ConfigMapMeta: c.ConfigmapMeta,
SecretMeta: c.SecretMeta,
MaxClientThreads: c.MaxClientThreads,
},
}
}
// LoadBackendsFromJson load backend from json bytes
func LoadBackendsFromJson(jsonData []byte) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
configmap := corev1.ConfigMap{}
err := json.Unmarshal(jsonData, &configmap)
if err != nil {
return result, err
}
return LoadBackendsFromConfigMap(configmap)
}
// LoadBackendsFromConfigMap load backend from configmap resource
func LoadBackendsFromConfigMap(configmap corev1.ConfigMap) (map[string]*BackendConfiguration, error) {
result := make(map[string]*BackendConfiguration)
jsonStr, ok := configmap.Data["csi.json"]
if !ok {
return result, errors.New("not found csi.json config")
}
backendContent, err := AnalyseBackendExist(jsonStr)
if err != nil {
return nil, err
}
var backends []*BackendConfiguration
if _, ok = backendContent.([]interface{}); ok {
backends, err = LoadMultipleBackendFromConfigmap(jsonStr)
} else {
backends, err = LoadSingleBackendFromConfigmap(jsonStr)
}
if err != nil {
return nil, err
}
for _, backend := range backends {
result[backend.Name] = backend
}
return result, nil
}
//AnalyseBackendExist analyse backend,an error is returned if backends not exist
func AnalyseBackendExist(jsonStr string) (interface{}, error) {
var config map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
backendContent, ok := config["backends"]
if !ok {
return nil, errors.New("not found backends config")
}
return backendContent, nil
}
// LoadSingleBackendFromConfigmap load single backend
func LoadSingleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends *BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return []*BackendConfiguration{config.Backends}, nil
}
// LoadMultipleBackendFromConfigmap load multiple backend
func LoadMultipleBackendFromConfigmap(jsonStr string) ([]*BackendConfiguration, error) {
config := struct {
Backends []*BackendConfiguration `json:"backends"`
}{}
if err := json.Unmarshal([]byte(jsonStr), &config); err != nil {
return nil, err
}
return config.Backends, nil
}
// LoadBackendsFromYaml load backend from yaml
func LoadBackendsFromYaml(yamlData []byte) (map[string]*BackendConfiguration, error) {
cleanYamlData := strings.Trim(strings.TrimSpace(string(yamlData)), YamlSeparator)
decoder := yaml.NewDecoder(bytes.NewReader([]byte(cleanYamlData)))
var backends = map[string]*BackendConfiguration{}
config := &BackendConfiguration{}
err := decoder.Decode(config)
for err == nil {
if !reflect.DeepEqual(*config, BackendConfiguration{}) {
backends[config.Name] = config
}
config = &BackendConfiguration{}
err = decoder.Decode(config)
}
if !errors.Is(err, io.EOF) {
return backends, err
}
return backends, nil
} | Url string `show:"Url"`
VendorName string `show:"VENDORNAME"`
StorageBackendContentName string `show:"STORAGEBACKENDCONTENTNAME"` | random_line_split |
drawing_support.py | """
Functions used to support drawing. No Pyglet/OpenGL here.
"""
import math
import pymunk
from PIL import Image
from pymunk import autogeometry
from typing import List, Tuple, cast
from arcade import Color
from arcade import RGBA
def get_points_for_thick_line(start_x: float, start_y: float,
end_x: float, end_y: float,
line_width: float):
"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""
vector_x = start_x - end_x
vector_y = start_y - end_y
perpendicular_x = vector_y
perpendicular_y = -vector_x
length = math.sqrt(vector_x * vector_x + vector_y * vector_y)
if length == 0:
normal_x = 1.0
normal_y = 1.0
else:
normal_x = perpendicular_x / length
normal_y = perpendicular_y / length
r1_x = start_x + normal_x * line_width / 2
r1_y = start_y + normal_y * line_width / 2
r2_x = start_x - normal_x * line_width / 2
r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
|
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points
| max_y = point.y | conditional_block |
drawing_support.py | """
Functions used to support drawing. No Pyglet/OpenGL here.
"""
import math
import pymunk
from PIL import Image
from pymunk import autogeometry
from typing import List, Tuple, cast
from arcade import Color
from arcade import RGBA
def get_points_for_thick_line(start_x: float, start_y: float,
end_x: float, end_y: float,
line_width: float):
"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""
vector_x = start_x - end_x
vector_y = start_y - end_y
perpendicular_x = vector_y
perpendicular_y = -vector_x
length = math.sqrt(vector_x * vector_x + vector_y * vector_y)
if length == 0:
normal_x = 1.0
normal_y = 1.0
else:
normal_x = perpendicular_x / length
normal_y = perpendicular_y / length
r1_x = start_x + normal_x * line_width / 2
r1_y = start_y + normal_y * line_width / 2
r2_x = start_x - normal_x * line_width / 2
| points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
max_y = point.y
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points | r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
| random_line_split |
drawing_support.py | """
Functions used to support drawing. No Pyglet/OpenGL here.
"""
import math
import pymunk
from PIL import Image
from pymunk import autogeometry
from typing import List, Tuple, cast
from arcade import Color
from arcade import RGBA
def get_points_for_thick_line(start_x: float, start_y: float,
end_x: float, end_y: float,
line_width: float):
"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""
vector_x = start_x - end_x
vector_y = start_y - end_y
perpendicular_x = vector_y
perpendicular_y = -vector_x
length = math.sqrt(vector_x * vector_x + vector_y * vector_y)
if length == 0:
normal_x = 1.0
normal_y = 1.0
else:
normal_x = perpendicular_x / length
normal_y = perpendicular_y / length
r1_x = start_x + normal_x * line_width / 2
r1_y = start_y + normal_y * line_width / 2
r2_x = start_x - normal_x * line_width / 2
r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
|
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def calculate_hit_box_points_detailed(image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
max_y = point.y
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points
| """
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency | identifier_body |
drawing_support.py | """
Functions used to support drawing. No Pyglet/OpenGL here.
"""
import math
import pymunk
from PIL import Image
from pymunk import autogeometry
from typing import List, Tuple, cast
from arcade import Color
from arcade import RGBA
def get_points_for_thick_line(start_x: float, start_y: float,
end_x: float, end_y: float,
line_width: float):
"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""
vector_x = start_x - end_x
vector_y = start_y - end_y
perpendicular_x = vector_y
perpendicular_y = -vector_x
length = math.sqrt(vector_x * vector_x + vector_y * vector_y)
if length == 0:
normal_x = 1.0
normal_y = 1.0
else:
normal_x = perpendicular_x / length
normal_y = perpendicular_y / length
r1_x = start_x + normal_x * line_width / 2
r1_y = start_y + normal_y * line_width / 2
r2_x = start_x - normal_x * line_width / 2
r2_y = start_y - normal_y * line_width / 2
r3_x = end_x + normal_x * line_width / 2
r3_y = end_y + normal_y * line_width / 2
r4_x = end_x - normal_x * line_width / 2
r4_y = end_y - normal_y * line_width / 2
points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)
return points
def get_four_byte_color(color: Color) -> RGBA:
"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""
if len(color) == 4:
return cast(RGBA, color)
elif len(color) == 3:
return color[0], color[1], color[2], 255
else:
raise ValueError("This isn't a 3 or 4 byte color")
def get_four_float_color(color: Color) -> Tuple[float, float, float, float]:
"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""
if len(color) == 4:
return color[0] / 255, color[1] / 255, color[2] / 255, color[3] / 255 # type: ignore
elif len(color) == 3:
return color[0] / 255, color[1] / 255, color[2] / 255, 1.0
else:
raise ValueError("This isn't a 3 or 4 byte color")
def make_transparent_color(color: Color, transparency: float):
"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""
return color[0], color[1], color[2], transparency
def rotate_point(x: float, y: float, cx: float, cy: float,
angle_degrees: float) -> List[float]:
"""
Rotate a point around a center.
:param x: x value of the point you want to rotate
:param y: y value of the point you want to rotate
:param cx: x value of the center point you want to rotate around
:param cy: y value of the center point you want to rotate around
:param angle_degrees: Angle, in degrees, to rotate
:return: Return rotated (x, y) pair
:rtype: (float, float)
"""
temp_x = x - cx
temp_y = y - cy
# now apply rotation
angle_radians = math.radians(angle_degrees)
cos_angle = math.cos(angle_radians)
sin_angle = math.sin(angle_radians)
rotated_x = temp_x * cos_angle - temp_y * sin_angle
rotated_y = temp_x * sin_angle + temp_y * cos_angle
# translate back
rounding_precision = 2
x = round(rotated_x + cx, rounding_precision)
y = round(rotated_y + cy, rounding_precision)
return [x, y]
def calculate_hit_box_points_simple(image):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image:
:Returns: List of points
"""
left_border = 0
good = True
while good and left_border < image.width:
for row in range(image.height):
pos = (left_border, row)
pixel = image.getpixel(pos)
if type(pixel) is int or len(pixel) != 4:
raise TypeError("Error, calculate_points called on image not in RGBA format")
else:
if pixel[3] != 0:
good = False
break
if good:
left_border += 1
right_border = image.width - 1
good = True
while good and right_border > 0:
for row in range(image.height):
pos = (right_border, row)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
right_border -= 1
top_border = 0
good = True
while good and top_border < image.height:
for column in range(image.width):
pos = (column, top_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
top_border += 1
bottom_border = image.height - 1
good = True
while good and bottom_border > 0:
for column in range(image.width):
pos = (column, bottom_border)
pixel = image.getpixel(pos)
if pixel[3] != 0:
good = False
break
if good:
bottom_border -= 1
# If the image is empty, return an empty set
if bottom_border == 0:
return []
def _check_corner_offset(start_x, start_y, x_direction, y_direction):
bad = False
offset = 0
while not bad:
y = start_y + (offset * y_direction)
x = start_x
for count in range(offset + 1):
my_pixel = image.getpixel((x, y))
# print(f"({x}, {y}) = {pixel} | ", end="")
if my_pixel[3] != 0:
bad = True
break
y -= y_direction
x += x_direction
# print(f" - {bad}")
if not bad:
offset += 1
# print(f"offset: {offset}")
return offset
def _r(point, height, width):
return point[0] - width / 2, (height - point[1]) - height / 2
top_left_corner_offset = _check_corner_offset(left_border, top_border, 1, 1)
top_right_corner_offset = _check_corner_offset(right_border, top_border, -1, 1)
bottom_left_corner_offset = _check_corner_offset(left_border, bottom_border, 1, -1)
bottom_right_corner_offset = _check_corner_offset(right_border, bottom_border, -1, -1)
p1 = left_border + top_left_corner_offset, top_border
p2 = (right_border + 1) - top_right_corner_offset, top_border
p3 = (right_border + 1), top_border + top_right_corner_offset
p4 = (right_border + 1), (bottom_border + 1) - bottom_right_corner_offset
p5 = (right_border + 1) - bottom_right_corner_offset, (bottom_border + 1)
p6 = left_border + bottom_left_corner_offset, (bottom_border + 1)
p7 = left_border, (bottom_border + 1) - bottom_left_corner_offset
p8 = left_border, top_border + top_left_corner_offset
result = []
h = image.height
w = image.width
result.append(_r(p7, h, w))
if bottom_left_corner_offset:
result.append(_r(p6, h, w))
result.append(_r(p5, h, w))
if bottom_right_corner_offset:
result.append(_r(p4, h, w))
result.append(_r(p3, h, w))
if top_right_corner_offset:
result.append(_r(p2, h, w))
result.append(_r(p1, h, w))
if top_left_corner_offset:
result.append(_r(p8, h, w))
# Remove duplicates
result = tuple(dict.fromkeys(result))
return result
def | (image: Image, hit_box_detail: float = 4.5):
"""
Given an image, this returns points that make up a hit box around it. Attempts
to trim out transparent pixels.
:param Image image: Image get hit box from.
:param int hit_box_detail: How detailed to make the hit box. There's a
trade-off in number of points vs. accuracy.
:Returns: List of points
"""
def sample_func(sample_point):
""" Method used to sample image. """
if sample_point[0] < 0 \
or sample_point[1] < 0 \
or sample_point[0] >= image.width \
or sample_point[1] >= image.height:
return 0
point_tuple = sample_point[0], sample_point[1]
color = image.getpixel(point_tuple)
if color[3] > 0:
return 255
else:
return 0
# Do a quick check if it is a full tile
p1 = 0, 0
p2 = 0, image.height - 1
p3 = image.width - 1, image.height - 1
p4 = image.width - 1, 0
if sample_func(p1) and sample_func(p2) and sample_func(p3) and sample_func(p4):
# Do a quick check if it is a full tile
p1 = (-image.width / 2, -image.height / 2)
p2 = (image.width / 2, -image.height / 2)
p3 = (image.width / 2, image.height / 2)
p4 = (-image.width / 2, image.height / 2)
return p1, p2, p3, p4
# Get the bounding box
logo_bb = pymunk.BB(-1, -1, image.width, image.height)
# Set of lines that trace the image
line_set = pymunk.autogeometry.PolylineSet()
# How often to sample?
downres = 1
horizontal_samples = int(image.width / downres)
vertical_samples = int(image.height / downres)
# Run the trace
# Get back one or more sets of lines covering stuff.
line_sets = pymunk.autogeometry.march_soft(
logo_bb,
horizontal_samples, vertical_samples,
99,
sample_func)
if len(line_sets) == 0:
return []
selected_line_set = line_sets[0]
selected_range = None
if len(line_set) > 1:
# We have more than one line set. Try and find one that covers most of
# the sprite.
for line in line_set:
min_x = None
min_y = None
max_x = None
max_y = None
for point in line:
if min_x is None or point.x < min_x:
min_x = point.x
if max_x is None or point.x > max_x:
max_x = point.x
if min_y is None or point.y < min_y:
min_y = point.y
if max_y is None or point.y > max_y:
max_y = point.y
if min_x is None or max_x is None or min_y is None or max_y is None:
raise ValueError("No points in bounding box.")
my_range = max_x - min_x + max_y + min_y
if selected_range is None or my_range > selected_range:
selected_range = my_range
selected_line_set = line
# Reduce number of vertices
# original_points = len(selected_line_set)
selected_line_set = pymunk.autogeometry.simplify_curves(selected_line_set,
hit_box_detail)
# downsampled_points = len(selected_line_set)
# Convert to normal points, offset fo 0,0 is center, flip the y
hh = image.height / 2
hw = image.width / 2
points = []
for vec2 in selected_line_set:
point = round(vec2.x - hw), round(image.height - (vec2.y - hh) - image.height)
points.append(point)
if len(points) > 1 and points[0] == points[-1]:
points.pop()
# print(f"{sprite.texture.name} Line-sets={len(line_set)}, Original points={original_points}, Downsampled points={downsampled_points}")
return points
| calculate_hit_box_points_detailed | identifier_name |
groupspec.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: akash/deployment/v1beta2/groupspec.proto
package v1beta2
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
v1beta2 "github.com/ovrclk/akash/types/v1beta2"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// GroupSpec stores group specifications
type GroupSpec struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"`
Requirements v1beta2.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"`
Resources []Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources" yaml:"resources"`
}
func (m *GroupSpec) Reset() { *m = GroupSpec{} }
func (m *GroupSpec) String() string { return proto.CompactTextString(m) }
func (*GroupSpec) ProtoMessage() {}
func (*GroupSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_8afb9070f2e843b2, []int{0}
}
func (m *GroupSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GroupSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_GroupSpec.Merge(m, src)
}
func (m *GroupSpec) XXX_Size() int {
return m.Size()
}
func (m *GroupSpec) XXX_DiscardUnknown() {
xxx_messageInfo_GroupSpec.DiscardUnknown(m)
}
var xxx_messageInfo_GroupSpec proto.InternalMessageInfo
func init() |
func init() {
proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2)
}
var fileDescriptor_8afb9070f2e843b2 = []byte{
// 351 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0xc6, 0x93, 0x16, 0x21, 0x9a, 0x32, 0xa0, 0x80, 0x44, 0xd4, 0x21, 0xae, 0x2c, 0x21, 0x82,
0x2a, 0x39, 0x22, 0x6c, 0x1d, 0xb3, 0xb0, 0x30, 0xa0, 0xb0, 0xb1, 0x39, 0xe1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Resources) > 0 {
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
{
size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int {
offset -= sovGroupspec(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GroupSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovGroupspec(uint64(l))
}
l = m.Requirements.Size()
n += 1 + l + sovGroupspec(uint64(l))
if len(m.Resources) > 0 {
for _, e := range m.Resources {
l = e.Size()
n += 1 + l + sovGroupspec(uint64(l))
}
}
return n
}
func sovGroupspec(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGroupspec(x uint64) (n int) {
return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GroupSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Resources = append(m.Resources, Resource{})
if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGroupspec(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGroupspec
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGroupspec(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGroupspec
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGroupspec
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGroupspec
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group")
)
| {
proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta2.GroupSpec")
} | identifier_body |
groupspec.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: akash/deployment/v1beta2/groupspec.proto
package v1beta2
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
v1beta2 "github.com/ovrclk/akash/types/v1beta2"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// GroupSpec stores group specifications
type GroupSpec struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"`
Requirements v1beta2.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"`
Resources []Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources" yaml:"resources"`
}
func (m *GroupSpec) Reset() { *m = GroupSpec{} }
func (m *GroupSpec) String() string { return proto.CompactTextString(m) }
func (*GroupSpec) ProtoMessage() {}
func (*GroupSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_8afb9070f2e843b2, []int{0}
}
func (m *GroupSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GroupSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_GroupSpec.Merge(m, src)
}
func (m *GroupSpec) XXX_Size() int {
return m.Size()
}
func (m *GroupSpec) XXX_DiscardUnknown() {
xxx_messageInfo_GroupSpec.DiscardUnknown(m)
}
var xxx_messageInfo_GroupSpec proto.InternalMessageInfo
func init() {
proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta2.GroupSpec")
}
func init() {
proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2)
}
var fileDescriptor_8afb9070f2e843b2 = []byte{
// 351 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0xc6, 0x93, 0x16, 0x21, 0x9a, 0x32, 0xa0, 0x80, 0x44, 0xd4, 0x21, 0xae, 0x2c, 0x21, 0x82,
0x2a, 0x39, 0x22, 0x6c, 0x1d, 0xb3, 0xb0, 0x30, 0xa0, 0xb0, 0xb1, 0x39, 0xe1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Resources) > 0 {
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
{
size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int {
offset -= sovGroupspec(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GroupSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovGroupspec(uint64(l))
}
l = m.Requirements.Size()
n += 1 + l + sovGroupspec(uint64(l))
if len(m.Resources) > 0 {
for _, e := range m.Resources {
l = e.Size()
n += 1 + l + sovGroupspec(uint64(l))
}
}
return n
}
func sovGroupspec(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGroupspec(x uint64) (n int) {
return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GroupSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + intStringLen
if postIndex < 0 { | }
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Resources = append(m.Resources, Resource{})
if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGroupspec(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGroupspec
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGroupspec(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGroupspec
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGroupspec
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGroupspec
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group")
) | return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF | random_line_split |
groupspec.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: akash/deployment/v1beta2/groupspec.proto
package v1beta2
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
v1beta2 "github.com/ovrclk/akash/types/v1beta2"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// GroupSpec stores group specifications
type GroupSpec struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"`
Requirements v1beta2.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"`
Resources []Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources" yaml:"resources"`
}
func (m *GroupSpec) Reset() { *m = GroupSpec{} }
func (m *GroupSpec) String() string { return proto.CompactTextString(m) }
func (*GroupSpec) ProtoMessage() {}
func (*GroupSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_8afb9070f2e843b2, []int{0}
}
func (m *GroupSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GroupSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_GroupSpec.Merge(m, src)
}
func (m *GroupSpec) XXX_Size() int {
return m.Size()
}
func (m *GroupSpec) XXX_DiscardUnknown() {
xxx_messageInfo_GroupSpec.DiscardUnknown(m)
}
var xxx_messageInfo_GroupSpec proto.InternalMessageInfo
func init() {
proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta2.GroupSpec")
}
func init() {
proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2)
}
var fileDescriptor_8afb9070f2e843b2 = []byte{
// 351 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0xc6, 0x93, 0x16, 0x21, 0x9a, 0x32, 0xa0, 0x80, 0x44, 0xd4, 0x21, 0xae, 0x2c, 0x21, 0x82,
0x2a, 0x39, 0x22, 0x6c, 0x1d, 0xb3, 0xb0, 0x30, 0xa0, 0xb0, 0xb1, 0x39, 0xe1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Resources) > 0 {
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
{
size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 |
return len(dAtA) - i, nil
}
func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int {
offset -= sovGroupspec(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GroupSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovGroupspec(uint64(l))
}
l = m.Requirements.Size()
n += 1 + l + sovGroupspec(uint64(l))
if len(m.Resources) > 0 {
for _, e := range m.Resources {
l = e.Size()
n += 1 + l + sovGroupspec(uint64(l))
}
}
return n
}
func sovGroupspec(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGroupspec(x uint64) (n int) {
return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GroupSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Resources = append(m.Resources, Resource{})
if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGroupspec(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGroupspec
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGroupspec(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGroupspec
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGroupspec
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGroupspec
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group")
)
| {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
} | conditional_block |
groupspec.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: akash/deployment/v1beta2/groupspec.proto
package v1beta2
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
v1beta2 "github.com/ovrclk/akash/types/v1beta2"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// GroupSpec stores group specifications
type GroupSpec struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"`
Requirements v1beta2.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"`
Resources []Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources" yaml:"resources"`
}
func (m *GroupSpec) Reset() { *m = GroupSpec{} }
func (m *GroupSpec) String() string { return proto.CompactTextString(m) }
func (*GroupSpec) ProtoMessage() {}
func (*GroupSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_8afb9070f2e843b2, []int{0}
}
func (m *GroupSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GroupSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_GroupSpec.Merge(m, src)
}
func (m *GroupSpec) XXX_Size() int {
return m.Size()
}
func (m *GroupSpec) XXX_DiscardUnknown() {
xxx_messageInfo_GroupSpec.DiscardUnknown(m)
}
var xxx_messageInfo_GroupSpec proto.InternalMessageInfo
func init() {
proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta2.GroupSpec")
}
func | () {
proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2)
}
var fileDescriptor_8afb9070f2e843b2 = []byte{
// 351 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0xc6, 0x93, 0x16, 0x21, 0x9a, 0x32, 0xa0, 0x80, 0x44, 0xd4, 0x21, 0xae, 0x2c, 0x21, 0x82,
0x2a, 0x39, 0x22, 0x6c, 0x1d, 0xb3, 0xb0, 0x30, 0xa0, 0xb0, 0xb1, 0x39, 0xe1, 0x94, 0x56, 0x4d,
0xea, 0x60, 0x3b, 0x15, 0xe5, 0x09, 0x18, 0x79, 0x84, 0x6e, 0xbc, 0x4a, 0xc7, 0x8e, 0x4c, 0x11,
0x6a, 0x17, 0xd4, 0xb1, 0x4f, 0x80, 0xf2, 0x8f, 0xb6, 0x43, 0x37, 0xdf, 0xf9, 0x77, 0xf7, 0xdd,
0x7d, 0xa7, 0x59, 0x74, 0x44, 0xc5, 0xc0, 0x7e, 0x81, 0x24, 0x62, 0xd3, 0x18, 0xc6, 0xd2, 0x9e,
0xdc, 0xfa, 0x20, 0xa9, 0x63, 0x87, 0x9c, 0xa5, 0x89, 0x48, 0x20, 0x20, 0x09, 0x67, 0x92, 0xe9,
0x46, 0x41, 0x92, 0x2d, 0x49, 0x2a, 0xb2, 0x73, 0x11, 0xb2, 0x90, 0x15, 0x90, 0x9d, 0xbf, 0x4a,
0xbe, 0x83, 0xcb, 0xce, 0x3e, 0x15, 0xf0, 0xdf, 0x93, 0x4a, 0xc9, 0x87, 0x7e, 0x2a, 0xa1, 0x62,
0xae, 0x0f, 0xaa, 0x73, 0x10, 0x2c, 0xe5, 0x41, 0x05, 0xe2, 0xaf, 0x86, 0xd6, 0xba, 0xcf, 0x07,
0x7a, 0x4a, 0x20, 0xd0, 0x7b, 0xda, 0xd1, 0x98, 0xc6, 0x60, 0xa8, 0x5d, 0xd5, 0x6a, 0xb9, 0x97,
0xeb, 0x0c, 0x15, 0xf1, 0x26, 0x43, 0xed, 0x29, 0x8d, 0xa3, 0x3e, 0xce, 0x23, 0xec, 0x15, 0x49,
0xfd, 0x5d, 0x3b, 0xe5, 0xf0, 0x9a, 0x0e, 0x39, 0xe4, 0x02, 0xc2, 0x68, 0x74, 0x55, 0xab, 0xed,
0xdc, 0x90, 0x72, 0x9d, 0x7c, 0xbc, 0x7a, 0x11, 0xf2, 0x18, 0xd1, 0xa0, 0xa0, 0xbc, 0x9d, 0x02,
0xb7, 0x37, 0xcf, 0x90, 0xb2, 0xce, 0xd0, 0x5e, 0x9b, 0x4d, 0x86, 0xce, 0x4b, 0xad, 0xdd, 0x2c,
0xf6, 0xf6, 0x20, 0x3d, 0xd4, 0x5a, 0xf5, 0x22, 0xc2, 0x68, 0x76, 0x9b, 0x56, 0xdb, 0xc1, 0xe4,
0x90, 0x8f, 0xc4, 0xab, 0x50, 0xf7, 0xaa, 0x52, 0xdc, 0x16, 0x6f, 0x32, 0x74, 0x56, 0xcb, 0x55,
0x29, 0xec, 0x6d, 0xbf, 0xfb, 0x27, 0x1f, 0x33, 0xa4, 0xfc, 0xce, 0x90, 0xe2, 0x3e, 0xcc, 0x97,
0xa6, 0xba, 0x58, 0x9a, 0xea, 0xcf, 0xd2, 0x54, 0x3f, 0x57, 0xa6, 0xb2, 0x58, 0x99, 0xca, 0xf7,
0xca, 0x54, 0x9e, 0x9d, 0x70, 0x28, 0x07, 0xa9, 0x4f, 0x02, 0x16, 0xdb, 0x6c, 0xc2, 0x83, 0x68,
0x64, 0x97, 0xf6, 0xbf, 0xed, 0x1e, 0x40, 0x4e, 0x13, 0x10, 0xf5, 0x19, 0xfc, 0xe3, 0xc2, 0xfe,
0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x85, 0x76, 0x81, 0x09, 0x27, 0x02, 0x00, 0x00,
}
func (m *GroupSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Resources) > 0 {
for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
{
size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGroupspec(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int {
offset -= sovGroupspec(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GroupSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovGroupspec(uint64(l))
}
l = m.Requirements.Size()
n += 1 + l + sovGroupspec(uint64(l))
if len(m.Resources) > 0 {
for _, e := range m.Resources {
l = e.Size()
n += 1 + l + sovGroupspec(uint64(l))
}
}
return n
}
func sovGroupspec(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGroupspec(x uint64) (n int) {
return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GroupSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGroupspec
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGroupspec
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGroupspec
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Resources = append(m.Resources, Resource{})
if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGroupspec(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGroupspec
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGroupspec(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGroupspec
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGroupspec
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGroupspec
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGroupspec
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group")
)
| init | identifier_name |
main.go | package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"time"
)
var validAttrs = []string{
"strength", "defense", "speed", "accuracy", "vitality", "resistance", "willpower",
}
func main() {
rand.Seed(time.Now().UnixNano())
Play()
}
type Unit struct {
Name string
Type string
BaseAttributes Attributes
combatAttrMods Attributes
currentAttributes Attributes
BaseStats BaseStats
Hp int
Fatigue int
IsHuman bool
Attacks []Attack
Team int
AiLevel int
}
func (u Unit) Crunch() Unit {
u.currentAttributes = Attributes{
Strength: u.BaseAttributes.Strength + u.combatAttrMods.Strength,
Speed: u.BaseAttributes.Speed + u.combatAttrMods.Speed,
Defense: u.BaseAttributes.Defense + u.combatAttrMods.Defense,
Accuracy: u.BaseAttributes.Accuracy + u.combatAttrMods.Accuracy,
Vitality: u.BaseAttributes.Vitality + u.combatAttrMods.Vitality,
Willpower: u.BaseAttributes.Willpower + u.combatAttrMods.Willpower,
Resistance: u.BaseAttributes.Resistance + u.combatAttrMods.Resistance,
}
u.currentAttributes.Speed -= u.Fatigue
u.currentAttributes.Defense -= int(u.Fatigue / 2)
u.currentAttributes.Accuracy -= int(u.Fatigue / 3)
u.currentAttributes.Strength -= int(u.Fatigue / 4)
u.currentAttributes.Willpower -= u.Fatigue
u.currentAttributes.Resistance -= (u.Fatigue - u.currentAttributes.Willpower)
u.BaseStats.MaxHp = u.currentAttributes.Vitality + int(u.currentAttributes.Defense/2)
return u
}
func (u Unit) ModStrength(i int) Unit {
u.combatAttrMods.Strength += i
return u
}
func (u Unit) ModDefense(i int) Unit {
u.combatAttrMods.Defense += i
return u
}
func (u Unit) ModAccuracy(i int) Unit {
u.combatAttrMods.Accuracy += i
return u
}
func (u Unit) ModVitality(i int) Unit {
u.combatAttrMods.Vitality += i
return u
}
func (u Unit) ModSpeed(i int) Unit {
u.combatAttrMods.Speed += i
return u
}
func (u Unit) ModWillpower(i int) Unit {
u.combatAttrMods.Willpower += i
return u
}
func (u Unit) ModResistance(i int) Unit {
u.combatAttrMods.Resistance += i
return u
}
func (u Unit) Speed() int {
return u.Crunch().currentAttributes.Speed
}
func (u Unit) Defense() int {
return u.Crunch().currentAttributes.Defense
}
func (u Unit) Strength() int {
return u.Crunch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool |
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
}
case "select":
var attr string
if active.IsHuman {
attr = readAttr("pick stat")
} else {
attr = validAttrs[randomInt(0, len(validAttrs)-1)]
}
result.Attr = attr
switch attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "vitality":
unit = unit.ModVitality(result.Damange)
}
}
if result.Attr == "miss" {
fmt.Printf("%v attack %v missed %v\r\n", active.Name, atk.Name, unit.Name)
} else {
fmt.Printf("%v attack %v hit %v and dealt %v to %v\r\n", active.Name, atk.Name, unit.Name, result.Damange, result.Attr)
if unit.Hp <= 0 {
fmt.Println(active.Name, "killed", unit.Name)
}
}
}
}
}
units[i] = unit
}
activeIndex := -1
for i, unit := range units {
if unit.Name == active.Name {
active = unit
activeIndex = i
break
}
}
active.Fatigue += atk.FatigueCost
active = active.Crunch()
units[activeIndex] = active
return units
}
func resolveAttack(attack Attack, unit Unit) AttackResult {
//TODO: support magic attacks
if attack.Team == unit.Team {
return AttackResult{
Attr: attack.Stat,
Damange: attack.PowerMod,
}
}
if attack.EffType == PHYS {
if attack.Accuracy < unit.Speed() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Defense()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
if attack.Accuracy < unit.Willpower() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Resistance()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
func PickTargets(atk Attack, lvl int, team int, human bool, units []Unit) []Unit {
if human {
fmt.Println("")
for _, unit := range units {
printUnit(unit)
}
fmt.Println("")
return pickPlayerTargets(atk.Targets, units)
}
return npcPickTargets(atk, lvl, team, units)
}
func PickAttack(unit Unit) Attack {
var atk Attack
if unit.IsHuman {
atk = pickPlayerAttack(unit.Attacks)
} else {
atk = npcPickAttack(unit)
}
atk.Team = unit.Team
//TODO: phys vs mag
if atk.EffType == PHYS {
atk.PowerMod += unit.Strength()
atk.Accuracy += unit.Accuracy()
} else {
atk.PowerMod += unit.Willpower()
atk.Accuracy += int((unit.Willpower() + unit.Resistance()) / 2)
}
return atk
}
func pickPlayerTargets(num int, units []Unit) []Unit {
var targets []Unit
for i := 0; i < num; i++ {
fmt.Println("Target", i)
targets = append(targets, selectPlayerTarget(units))
}
return targets
}
func npcPickAttack(unit Unit) Attack {
if unit.AiLevel < 3 {
return unit.Attacks[randomInt(0, len(unit.Attacks)-1)]
}
if unit.Fatigue > 4 {
for _, atk := range unit.Attacks {
if atk.Name == "rest" {
return atk
}
}
}
var basic Attack
for _, atk := range unit.Attacks {
if atk.Name == "big" && unit.Strength() < unit.Accuracy() {
return atk
}
if atk.Name == "small" && unit.Accuracy() < unit.Strength() {
return atk
}
if atk.Name == "basic" {
basic = atk
}
}
return basic
}
func npcPickTargets(atk Attack, lvl int, team int, units []Unit) []Unit {
targets := []Unit{}
for i := 0; i < atk.Targets; i++ {
if lvl < 2 {
randUnit := randomInt(0, len(units)-1)
targets = append(targets, units[randUnit])
} else {
targets = append(targets, findFirstValidTarget(atk.PowerMod, team, units))
}
}
return targets
}
func findFirstValidTarget(power int, team int, units []Unit) Unit {
for _, unit := range units {
if power > 0 {
if unit.Team != team {
return unit
}
} else if unit.Team == team {
return unit
}
}
if len(units) < 1 {
return Unit{}
}
return units[0]
}
func selectPlayerTarget(units []Unit) Unit {
chosen := read("pick target")
target := Unit{}
for _, unit := range units {
if unit.Name == chosen {
target = unit
}
}
if target.Name == "" {
fmt.Println("invalid target")
return selectPlayerTarget(units)
}
return target
}
func pickPlayerAttack(atks []Attack) Attack {
atkStr := read("choose attack")
var atk Attack
for _, ak := range atks {
if ak.Name == atkStr {
atk = ak
}
}
if atk.Name == "" {
fmt.Println("Ivalid attack")
return pickPlayerAttack(atks)
}
return atk
}
func randomInt(min, max int) int {
max++
return min + rand.Intn(max-min)
}
func read(msg string) string {
reader := bufio.NewReader(os.Stdin)
fmt.Println(msg)
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
text = strings.Replace(text, "\r", "", -1)
return text
}
func readAsInt(msg string) int {
raw := read(msg)
i, err := strconv.Atoi(raw)
if err != nil {
readAsInt("invalid number, try again")
}
return i
}
| {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
} | identifier_body |
main.go | package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"time"
)
var validAttrs = []string{
"strength", "defense", "speed", "accuracy", "vitality", "resistance", "willpower",
}
func main() {
rand.Seed(time.Now().UnixNano())
Play()
}
type Unit struct {
Name string
Type string
BaseAttributes Attributes
combatAttrMods Attributes
currentAttributes Attributes
BaseStats BaseStats
Hp int
Fatigue int
IsHuman bool
Attacks []Attack
Team int
AiLevel int
}
func (u Unit) Crunch() Unit {
u.currentAttributes = Attributes{
Strength: u.BaseAttributes.Strength + u.combatAttrMods.Strength,
Speed: u.BaseAttributes.Speed + u.combatAttrMods.Speed,
Defense: u.BaseAttributes.Defense + u.combatAttrMods.Defense,
Accuracy: u.BaseAttributes.Accuracy + u.combatAttrMods.Accuracy,
Vitality: u.BaseAttributes.Vitality + u.combatAttrMods.Vitality,
Willpower: u.BaseAttributes.Willpower + u.combatAttrMods.Willpower,
Resistance: u.BaseAttributes.Resistance + u.combatAttrMods.Resistance,
}
u.currentAttributes.Speed -= u.Fatigue
u.currentAttributes.Defense -= int(u.Fatigue / 2)
u.currentAttributes.Accuracy -= int(u.Fatigue / 3)
u.currentAttributes.Strength -= int(u.Fatigue / 4)
u.currentAttributes.Willpower -= u.Fatigue
u.currentAttributes.Resistance -= (u.Fatigue - u.currentAttributes.Willpower)
u.BaseStats.MaxHp = u.currentAttributes.Vitality + int(u.currentAttributes.Defense/2)
return u
}
func (u Unit) ModStrength(i int) Unit {
u.combatAttrMods.Strength += i
return u
}
func (u Unit) ModDefense(i int) Unit {
u.combatAttrMods.Defense += i
return u
}
func (u Unit) | (i int) Unit {
u.combatAttrMods.Accuracy += i
return u
}
func (u Unit) ModVitality(i int) Unit {
u.combatAttrMods.Vitality += i
return u
}
func (u Unit) ModSpeed(i int) Unit {
u.combatAttrMods.Speed += i
return u
}
func (u Unit) ModWillpower(i int) Unit {
u.combatAttrMods.Willpower += i
return u
}
func (u Unit) ModResistance(i int) Unit {
u.combatAttrMods.Resistance += i
return u
}
func (u Unit) Speed() int {
return u.Crunch().currentAttributes.Speed
}
func (u Unit) Defense() int {
return u.Crunch().currentAttributes.Defense
}
func (u Unit) Strength() int {
return u.Crunch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
}
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
}
case "select":
var attr string
if active.IsHuman {
attr = readAttr("pick stat")
} else {
attr = validAttrs[randomInt(0, len(validAttrs)-1)]
}
result.Attr = attr
switch attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "vitality":
unit = unit.ModVitality(result.Damange)
}
}
if result.Attr == "miss" {
fmt.Printf("%v attack %v missed %v\r\n", active.Name, atk.Name, unit.Name)
} else {
fmt.Printf("%v attack %v hit %v and dealt %v to %v\r\n", active.Name, atk.Name, unit.Name, result.Damange, result.Attr)
if unit.Hp <= 0 {
fmt.Println(active.Name, "killed", unit.Name)
}
}
}
}
}
units[i] = unit
}
activeIndex := -1
for i, unit := range units {
if unit.Name == active.Name {
active = unit
activeIndex = i
break
}
}
active.Fatigue += atk.FatigueCost
active = active.Crunch()
units[activeIndex] = active
return units
}
func resolveAttack(attack Attack, unit Unit) AttackResult {
//TODO: support magic attacks
if attack.Team == unit.Team {
return AttackResult{
Attr: attack.Stat,
Damange: attack.PowerMod,
}
}
if attack.EffType == PHYS {
if attack.Accuracy < unit.Speed() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Defense()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
if attack.Accuracy < unit.Willpower() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Resistance()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
func PickTargets(atk Attack, lvl int, team int, human bool, units []Unit) []Unit {
if human {
fmt.Println("")
for _, unit := range units {
printUnit(unit)
}
fmt.Println("")
return pickPlayerTargets(atk.Targets, units)
}
return npcPickTargets(atk, lvl, team, units)
}
func PickAttack(unit Unit) Attack {
var atk Attack
if unit.IsHuman {
atk = pickPlayerAttack(unit.Attacks)
} else {
atk = npcPickAttack(unit)
}
atk.Team = unit.Team
//TODO: phys vs mag
if atk.EffType == PHYS {
atk.PowerMod += unit.Strength()
atk.Accuracy += unit.Accuracy()
} else {
atk.PowerMod += unit.Willpower()
atk.Accuracy += int((unit.Willpower() + unit.Resistance()) / 2)
}
return atk
}
func pickPlayerTargets(num int, units []Unit) []Unit {
var targets []Unit
for i := 0; i < num; i++ {
fmt.Println("Target", i)
targets = append(targets, selectPlayerTarget(units))
}
return targets
}
func npcPickAttack(unit Unit) Attack {
if unit.AiLevel < 3 {
return unit.Attacks[randomInt(0, len(unit.Attacks)-1)]
}
if unit.Fatigue > 4 {
for _, atk := range unit.Attacks {
if atk.Name == "rest" {
return atk
}
}
}
var basic Attack
for _, atk := range unit.Attacks {
if atk.Name == "big" && unit.Strength() < unit.Accuracy() {
return atk
}
if atk.Name == "small" && unit.Accuracy() < unit.Strength() {
return atk
}
if atk.Name == "basic" {
basic = atk
}
}
return basic
}
func npcPickTargets(atk Attack, lvl int, team int, units []Unit) []Unit {
targets := []Unit{}
for i := 0; i < atk.Targets; i++ {
if lvl < 2 {
randUnit := randomInt(0, len(units)-1)
targets = append(targets, units[randUnit])
} else {
targets = append(targets, findFirstValidTarget(atk.PowerMod, team, units))
}
}
return targets
}
func findFirstValidTarget(power int, team int, units []Unit) Unit {
for _, unit := range units {
if power > 0 {
if unit.Team != team {
return unit
}
} else if unit.Team == team {
return unit
}
}
if len(units) < 1 {
return Unit{}
}
return units[0]
}
func selectPlayerTarget(units []Unit) Unit {
chosen := read("pick target")
target := Unit{}
for _, unit := range units {
if unit.Name == chosen {
target = unit
}
}
if target.Name == "" {
fmt.Println("invalid target")
return selectPlayerTarget(units)
}
return target
}
func pickPlayerAttack(atks []Attack) Attack {
atkStr := read("choose attack")
var atk Attack
for _, ak := range atks {
if ak.Name == atkStr {
atk = ak
}
}
if atk.Name == "" {
fmt.Println("Ivalid attack")
return pickPlayerAttack(atks)
}
return atk
}
func randomInt(min, max int) int {
max++
return min + rand.Intn(max-min)
}
func read(msg string) string {
reader := bufio.NewReader(os.Stdin)
fmt.Println(msg)
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
text = strings.Replace(text, "\r", "", -1)
return text
}
func readAsInt(msg string) int {
raw := read(msg)
i, err := strconv.Atoi(raw)
if err != nil {
readAsInt("invalid number, try again")
}
return i
}
| ModAccuracy | identifier_name |
main.go | package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"time"
)
var validAttrs = []string{
"strength", "defense", "speed", "accuracy", "vitality", "resistance", "willpower",
}
func main() {
rand.Seed(time.Now().UnixNano())
Play()
}
type Unit struct {
Name string
Type string
BaseAttributes Attributes
combatAttrMods Attributes
currentAttributes Attributes
BaseStats BaseStats
Hp int
Fatigue int
IsHuman bool
Attacks []Attack
Team int
AiLevel int
}
func (u Unit) Crunch() Unit {
u.currentAttributes = Attributes{
Strength: u.BaseAttributes.Strength + u.combatAttrMods.Strength,
Speed: u.BaseAttributes.Speed + u.combatAttrMods.Speed,
Defense: u.BaseAttributes.Defense + u.combatAttrMods.Defense,
Accuracy: u.BaseAttributes.Accuracy + u.combatAttrMods.Accuracy,
Vitality: u.BaseAttributes.Vitality + u.combatAttrMods.Vitality,
Willpower: u.BaseAttributes.Willpower + u.combatAttrMods.Willpower,
Resistance: u.BaseAttributes.Resistance + u.combatAttrMods.Resistance,
}
u.currentAttributes.Speed -= u.Fatigue
u.currentAttributes.Defense -= int(u.Fatigue / 2)
u.currentAttributes.Accuracy -= int(u.Fatigue / 3)
u.currentAttributes.Strength -= int(u.Fatigue / 4)
u.currentAttributes.Willpower -= u.Fatigue
u.currentAttributes.Resistance -= (u.Fatigue - u.currentAttributes.Willpower)
u.BaseStats.MaxHp = u.currentAttributes.Vitality + int(u.currentAttributes.Defense/2)
return u
}
func (u Unit) ModStrength(i int) Unit {
u.combatAttrMods.Strength += i
return u
}
func (u Unit) ModDefense(i int) Unit {
u.combatAttrMods.Defense += i
return u
}
func (u Unit) ModAccuracy(i int) Unit {
u.combatAttrMods.Accuracy += i
return u
}
func (u Unit) ModVitality(i int) Unit {
u.combatAttrMods.Vitality += i
return u
}
func (u Unit) ModSpeed(i int) Unit {
u.combatAttrMods.Speed += i
return u
}
func (u Unit) ModWillpower(i int) Unit {
u.combatAttrMods.Willpower += i
return u
}
func (u Unit) ModResistance(i int) Unit {
u.combatAttrMods.Resistance += i
return u
}
func (u Unit) Speed() int {
return u.Crunch().currentAttributes.Speed
}
func (u Unit) Defense() int {
return u.Crunch().currentAttributes.Defense
}
func (u Unit) Strength() int {
return u.Crunch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
}
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results |
}
}
units[i] = unit
}
activeIndex := -1
for i, unit := range units {
if unit.Name == active.Name {
active = unit
activeIndex = i
break
}
}
active.Fatigue += atk.FatigueCost
active = active.Crunch()
units[activeIndex] = active
return units
}
func resolveAttack(attack Attack, unit Unit) AttackResult {
//TODO: support magic attacks
if attack.Team == unit.Team {
return AttackResult{
Attr: attack.Stat,
Damange: attack.PowerMod,
}
}
if attack.EffType == PHYS {
if attack.Accuracy < unit.Speed() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Defense()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
if attack.Accuracy < unit.Willpower() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Resistance()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
func PickTargets(atk Attack, lvl int, team int, human bool, units []Unit) []Unit {
if human {
fmt.Println("")
for _, unit := range units {
printUnit(unit)
}
fmt.Println("")
return pickPlayerTargets(atk.Targets, units)
}
return npcPickTargets(atk, lvl, team, units)
}
func PickAttack(unit Unit) Attack {
var atk Attack
if unit.IsHuman {
atk = pickPlayerAttack(unit.Attacks)
} else {
atk = npcPickAttack(unit)
}
atk.Team = unit.Team
//TODO: phys vs mag
if atk.EffType == PHYS {
atk.PowerMod += unit.Strength()
atk.Accuracy += unit.Accuracy()
} else {
atk.PowerMod += unit.Willpower()
atk.Accuracy += int((unit.Willpower() + unit.Resistance()) / 2)
}
return atk
}
func pickPlayerTargets(num int, units []Unit) []Unit {
var targets []Unit
for i := 0; i < num; i++ {
fmt.Println("Target", i)
targets = append(targets, selectPlayerTarget(units))
}
return targets
}
func npcPickAttack(unit Unit) Attack {
if unit.AiLevel < 3 {
return unit.Attacks[randomInt(0, len(unit.Attacks)-1)]
}
if unit.Fatigue > 4 {
for _, atk := range unit.Attacks {
if atk.Name == "rest" {
return atk
}
}
}
var basic Attack
for _, atk := range unit.Attacks {
if atk.Name == "big" && unit.Strength() < unit.Accuracy() {
return atk
}
if atk.Name == "small" && unit.Accuracy() < unit.Strength() {
return atk
}
if atk.Name == "basic" {
basic = atk
}
}
return basic
}
func npcPickTargets(atk Attack, lvl int, team int, units []Unit) []Unit {
targets := []Unit{}
for i := 0; i < atk.Targets; i++ {
if lvl < 2 {
randUnit := randomInt(0, len(units)-1)
targets = append(targets, units[randUnit])
} else {
targets = append(targets, findFirstValidTarget(atk.PowerMod, team, units))
}
}
return targets
}
func findFirstValidTarget(power int, team int, units []Unit) Unit {
for _, unit := range units {
if power > 0 {
if unit.Team != team {
return unit
}
} else if unit.Team == team {
return unit
}
}
if len(units) < 1 {
return Unit{}
}
return units[0]
}
func selectPlayerTarget(units []Unit) Unit {
chosen := read("pick target")
target := Unit{}
for _, unit := range units {
if unit.Name == chosen {
target = unit
}
}
if target.Name == "" {
fmt.Println("invalid target")
return selectPlayerTarget(units)
}
return target
}
func pickPlayerAttack(atks []Attack) Attack {
atkStr := read("choose attack")
var atk Attack
for _, ak := range atks {
if ak.Name == atkStr {
atk = ak
}
}
if atk.Name == "" {
fmt.Println("Ivalid attack")
return pickPlayerAttack(atks)
}
return atk
}
func randomInt(min, max int) int {
max++
return min + rand.Intn(max-min)
}
func read(msg string) string {
reader := bufio.NewReader(os.Stdin)
fmt.Println(msg)
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
text = strings.Replace(text, "\r", "", -1)
return text
}
func readAsInt(msg string) int {
raw := read(msg)
i, err := strconv.Atoi(raw)
if err != nil {
readAsInt("invalid number, try again")
}
return i
}
| {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
}
case "select":
var attr string
if active.IsHuman {
attr = readAttr("pick stat")
} else {
attr = validAttrs[randomInt(0, len(validAttrs)-1)]
}
result.Attr = attr
switch attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "vitality":
unit = unit.ModVitality(result.Damange)
}
}
if result.Attr == "miss" {
fmt.Printf("%v attack %v missed %v\r\n", active.Name, atk.Name, unit.Name)
} else {
fmt.Printf("%v attack %v hit %v and dealt %v to %v\r\n", active.Name, atk.Name, unit.Name, result.Damange, result.Attr)
if unit.Hp <= 0 {
fmt.Println(active.Name, "killed", unit.Name)
}
}
} | conditional_block |
main.go | package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"time"
)
var validAttrs = []string{
"strength", "defense", "speed", "accuracy", "vitality", "resistance", "willpower",
}
func main() {
rand.Seed(time.Now().UnixNano())
Play()
}
type Unit struct {
Name string
Type string
BaseAttributes Attributes
combatAttrMods Attributes
currentAttributes Attributes
BaseStats BaseStats
Hp int
Fatigue int
IsHuman bool
Attacks []Attack
Team int
AiLevel int
}
func (u Unit) Crunch() Unit {
u.currentAttributes = Attributes{
Strength: u.BaseAttributes.Strength + u.combatAttrMods.Strength,
Speed: u.BaseAttributes.Speed + u.combatAttrMods.Speed,
Defense: u.BaseAttributes.Defense + u.combatAttrMods.Defense,
Accuracy: u.BaseAttributes.Accuracy + u.combatAttrMods.Accuracy,
Vitality: u.BaseAttributes.Vitality + u.combatAttrMods.Vitality,
Willpower: u.BaseAttributes.Willpower + u.combatAttrMods.Willpower,
Resistance: u.BaseAttributes.Resistance + u.combatAttrMods.Resistance,
}
u.currentAttributes.Speed -= u.Fatigue
u.currentAttributes.Defense -= int(u.Fatigue / 2)
u.currentAttributes.Accuracy -= int(u.Fatigue / 3)
u.currentAttributes.Strength -= int(u.Fatigue / 4)
u.currentAttributes.Willpower -= u.Fatigue
u.currentAttributes.Resistance -= (u.Fatigue - u.currentAttributes.Willpower)
u.BaseStats.MaxHp = u.currentAttributes.Vitality + int(u.currentAttributes.Defense/2)
return u
}
func (u Unit) ModStrength(i int) Unit {
u.combatAttrMods.Strength += i
return u
}
func (u Unit) ModDefense(i int) Unit {
u.combatAttrMods.Defense += i
return u
}
func (u Unit) ModAccuracy(i int) Unit {
u.combatAttrMods.Accuracy += i
return u
}
func (u Unit) ModVitality(i int) Unit {
u.combatAttrMods.Vitality += i
return u
}
func (u Unit) ModSpeed(i int) Unit {
u.combatAttrMods.Speed += i
return u
}
func (u Unit) ModWillpower(i int) Unit {
u.combatAttrMods.Willpower += i
return u
}
func (u Unit) ModResistance(i int) Unit {
u.combatAttrMods.Resistance += i
return u
}
func (u Unit) Speed() int {
return u.Crunch().currentAttributes.Speed
}
func (u Unit) Defense() int {
return u.Crunch().currentAttributes.Defense
}
func (u Unit) Strength() int {
return u.Crunch().currentAttributes.Strength
}
func (u Unit) Accuracy() int {
return u.Crunch().currentAttributes.Accuracy
}
func (u Unit) Vitality() int {
return u.Crunch().currentAttributes.Vitality
}
func (u Unit) Willpower() int {
return u.Crunch().currentAttributes.Willpower
}
func (u Unit) Resistance() int {
return u.Crunch().currentAttributes.Resistance
}
type BaseStats struct {
MaxHp int
}
type Attributes struct {
Strength int
Defense int
Speed int
Accuracy int
Vitality int
Willpower int
Resistance int
}
type EffectType int
const (
PHYS EffectType = iota
MAG
SELF
)
type Attack struct {
Name string
FatigueCost int
PowerMod int
Accuracy int
Targets int
Stat string
EffType EffectType
Team int
}
type AttackMod struct {
PowerMod int
AccMod int
}
type AttackResult struct {
Damange int
Attr string
}
func Play() {
player := createPlayer()
for fight := 1; ; fight++ {
fmt.Println("starting fight", fight)
enemies := genEnemies(fight, 1)
combatants := enemies
combatants = append(combatants, player)
for round := 0; ; round++ {
fmt.Println("round", round)
combatants = playRound(combatants)
for _, c := range combatants {
if c.Name == player.Name {
player = c
break
}
}
if player.Hp <= 0 {
fmt.Println("you ded, try again")
os.Exit(0)
}
if checkOver(combatants) {
read("round over")
break
}
}
}
}
func playRound(combatants []Unit) []Unit {
orderedUnitNames := getPlayOrder(combatants)
for _, name := range orderedUnitNames {
if checkOver(combatants) {
return combatants
}
for _, c := range combatants {
if c.Name == name {
printUnit(c)
combatants = Turn(c, combatants)
break
}
}
}
return combatants
}
func checkOver(cbts []Unit) bool {
teams := make(map[int]bool)
for _, cbt := range cbts {
if cbt.Hp > 0 {
teams[cbt.Team] = true
}
}
if len(teams) < 2 {
return true
}
return false
}
type order struct {
Name string
Speed int
}
func getPlayOrder(units []Unit) []string {
var orders []order
for _, u := range units {
orders = append(orders, order{u.Name, u.Speed()})
}
return handleOrders(orders)
}
func handleOrders(orders []order) []string {
ordered := []string{}
for {
if allAccounted(orders, ordered) {
break
}
next := getNext(orders)
ordered = append(ordered, next)
for i, order := range orders {
if order.Name == next {
order.Speed -= 5
orders[i] = order
}
}
}
return ordered
}
func allAccounted(orders []order, list []string) bool {
for _, o := range orders {
if !contains(o.Name, list) {
return false
}
}
return true
}
func getNext(orders []order) string {
sort.Slice(orders, func(i, j int) bool {
return orders[i].Speed > orders[j].Speed
})
return orders[0].Name
}
func printUnit(unit Unit) {
fmt.Printf("Name: %v. Hp: %v/%v. Lvl: %v. Team: %v. Fat: %v. CurAttrs[Str: %v, Def: %v, Spd: %v, Acc: %v, Vit: %v]\r\n", unit.Name, unit.Hp, unit.BaseStats.MaxHp,
unit.AiLevel, unit.Team,
unit.Fatigue, unit.Strength(), unit.Defense(), unit.Speed(), unit.Accuracy(), unit.Vitality())
}
func printAttack(atk Attack) {
fmt.Println("")
fmt.Printf("Chosen Attack: Name: %v. Stat: %v. Pow: %v. Acc: %v. NumTargets: %v. FatCost: %v.\r\n", atk.Name, atk.Stat, atk.PowerMod, atk.Accuracy, atk.Targets, atk.FatigueCost)
fmt.Println("")
}
func createPlayer() Unit {
name := read("name")
strong := readAttr("good")
weak := readAttr("bad")
attrs := Attributes{
5, 5, 5, 5, 5, 5, 5,
}
for _, attr := range validAttrs {
if attr == strong {
switch attr {
case "strength":
attrs.Strength += 5
case "defense":
attrs.Defense += 5
case "speed":
attrs.Speed += 5
case "accuracy":
attrs.Accuracy += 5
case "vitality":
attrs.Vitality += 5
}
}
if attr == weak {
switch attr {
case "strength":
attrs.Strength -= 5
case "defense":
attrs.Defense -= 5
case "speed":
attrs.Speed -= 5
case "accuracy":
attrs.Accuracy -= 5
case "vitality":
attrs.Vitality -= 5
}
}
}
player := CreateUnit(name, attrs)
player.Attacks = getAttacks()
player.IsHuman = true
return player
}
func CreateUnit(name string, attrs Attributes) Unit {
unit := Unit{
Name: name,
BaseAttributes: attrs,
Attacks: []Attack{getAttacks()[0]},
}
unit = unit.Crunch()
unit.Hp = unit.BaseStats.MaxHp
return unit
}
func readAttr(msg string) string {
raw := read(msg)
if !contains(raw, validAttrs) {
fmt.Println("invalid attr")
return readAttr(msg)
}
return raw
}
func contains(target string, strs []string) bool {
for _, s := range strs {
if s == target {
return true
}
}
return false
}
var abs = 0
func genEnemies(i int, team int) []Unit {
units := []Unit{}
count := 1
if i > 5 {
count++
}
if i > 10 {
count++
}
if i > 15 {
count++
}
for x := 0; x < count; x++ {
units = append(units, genUnit(x, i, team))
}
return units
}
var atk = 0
func genUnit(x int, i int, team int) Unit {
potentialAtks := getAttacks()
attrs := Attributes{offset(i), offset(i), offset(i), offset(i), offset(i), offset(i), offset(i)}
unit := CreateUnit(fmt.Sprintf("goblin%v-%v-%v", i, x, abs), attrs)
unit.Team = team
unit.AiLevel = randomInt(0, 10)
if i%3 == 0 && atk < len(potentialAtks) {
unit.Attacks = append(unit.Attacks, potentialAtks[atk])
atk++
}
abs++
return unit
}
func offset(i int) int {
return i + (int(i/2) * randomInt(-1, 1))
}
func Turn(active Unit, units []Unit) []Unit {
if active.Hp <= 0 {
fmt.Println(active.Name, "is dead")
return units
}
atk := PickAttack(active)
printAttack(atk)
var targets []Unit
if atk.EffType == SELF {
targets = []Unit{active}
} else {
targets = PickTargets(atk, active.AiLevel, active.Team, active.IsHuman, units)
}
hitMap := make(map[string][]AttackResult)
for _, target := range targets {
hitMap[target.Name] = append(hitMap[target.Name], resolveAttack(atk, target))
}
for i, unit := range units {
for name, results := range hitMap {
if name == unit.Name {
for _, result := range results {
switch result.Attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "fatigue":
unit.Fatigue += result.Damange
case "hp":
unit.Hp -= result.Damange
if unit.Hp > unit.BaseStats.MaxHp {
unit.Hp = unit.BaseStats.MaxHp
}
case "select":
var attr string
if active.IsHuman {
attr = readAttr("pick stat")
} else {
attr = validAttrs[randomInt(0, len(validAttrs)-1)]
}
result.Attr = attr
switch attr {
case "strength":
unit = unit.ModStrength(result.Damange)
case "defense":
unit = unit.ModDefense(result.Damange)
case "speed":
unit = unit.ModSpeed(result.Damange)
case "accuracy":
unit = unit.ModAccuracy(result.Damange)
case "vitality":
unit = unit.ModVitality(result.Damange)
}
}
if result.Attr == "miss" {
fmt.Printf("%v attack %v missed %v\r\n", active.Name, atk.Name, unit.Name)
} else {
fmt.Printf("%v attack %v hit %v and dealt %v to %v\r\n", active.Name, atk.Name, unit.Name, result.Damange, result.Attr)
if unit.Hp <= 0 {
fmt.Println(active.Name, "killed", unit.Name)
}
}
}
}
}
units[i] = unit
}
activeIndex := -1
for i, unit := range units {
if unit.Name == active.Name {
active = unit
activeIndex = i
break
}
}
active.Fatigue += atk.FatigueCost
active = active.Crunch()
units[activeIndex] = active
return units
}
func resolveAttack(attack Attack, unit Unit) AttackResult {
//TODO: support magic attacks
if attack.Team == unit.Team {
return AttackResult{
Attr: attack.Stat,
Damange: attack.PowerMod,
}
}
if attack.EffType == PHYS {
if attack.Accuracy < unit.Speed() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Defense()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
if attack.Accuracy < unit.Willpower() {
return AttackResult{
Attr: "miss",
}
}
dmg := attack.PowerMod - unit.Resistance()
if dmg < 0 {
dmg = 0
}
return AttackResult{
Attr: attack.Stat,
Damange: dmg,
}
}
func PickTargets(atk Attack, lvl int, team int, human bool, units []Unit) []Unit {
if human {
fmt.Println("")
for _, unit := range units {
printUnit(unit)
}
fmt.Println("")
return pickPlayerTargets(atk.Targets, units)
}
return npcPickTargets(atk, lvl, team, units)
}
func PickAttack(unit Unit) Attack {
var atk Attack
if unit.IsHuman {
atk = pickPlayerAttack(unit.Attacks)
} else {
atk = npcPickAttack(unit)
}
atk.Team = unit.Team
//TODO: phys vs mag
if atk.EffType == PHYS {
atk.PowerMod += unit.Strength()
atk.Accuracy += unit.Accuracy()
} else {
atk.PowerMod += unit.Willpower()
atk.Accuracy += int((unit.Willpower() + unit.Resistance()) / 2)
}
return atk
}
func pickPlayerTargets(num int, units []Unit) []Unit {
var targets []Unit
for i := 0; i < num; i++ {
fmt.Println("Target", i)
targets = append(targets, selectPlayerTarget(units))
}
return targets
}
func npcPickAttack(unit Unit) Attack {
if unit.AiLevel < 3 {
return unit.Attacks[randomInt(0, len(unit.Attacks)-1)]
}
if unit.Fatigue > 4 {
for _, atk := range unit.Attacks {
if atk.Name == "rest" {
return atk
}
}
}
var basic Attack
for _, atk := range unit.Attacks {
if atk.Name == "big" && unit.Strength() < unit.Accuracy() {
return atk
}
if atk.Name == "small" && unit.Accuracy() < unit.Strength() {
return atk
}
if atk.Name == "basic" {
basic = atk
}
}
return basic
}
func npcPickTargets(atk Attack, lvl int, team int, units []Unit) []Unit {
targets := []Unit{}
for i := 0; i < atk.Targets; i++ {
if lvl < 2 {
randUnit := randomInt(0, len(units)-1)
targets = append(targets, units[randUnit])
} else {
targets = append(targets, findFirstValidTarget(atk.PowerMod, team, units))
}
}
return targets
}
func findFirstValidTarget(power int, team int, units []Unit) Unit {
for _, unit := range units {
if power > 0 {
if unit.Team != team {
return unit
}
} else if unit.Team == team {
return unit
}
}
if len(units) < 1 {
return Unit{}
}
return units[0]
}
func selectPlayerTarget(units []Unit) Unit {
chosen := read("pick target")
target := Unit{}
for _, unit := range units {
if unit.Name == chosen {
target = unit
}
}
if target.Name == "" {
fmt.Println("invalid target")
return selectPlayerTarget(units)
}
return target
}
func pickPlayerAttack(atks []Attack) Attack {
atkStr := read("choose attack")
var atk Attack
for _, ak := range atks {
if ak.Name == atkStr {
atk = ak | if atk.Name == "" {
fmt.Println("Ivalid attack")
return pickPlayerAttack(atks)
}
return atk
}
func randomInt(min, max int) int {
max++
return min + rand.Intn(max-min)
}
func read(msg string) string {
reader := bufio.NewReader(os.Stdin)
fmt.Println(msg)
text, _ := reader.ReadString('\n')
text = strings.Replace(text, "\n", "", -1)
text = strings.Replace(text, "\r", "", -1)
return text
}
func readAsInt(msg string) int {
raw := read(msg)
i, err := strconv.Atoi(raw)
if err != nil {
readAsInt("invalid number, try again")
}
return i
} | }
} | random_line_split |
execution.rs | // Copyright 2018-2021 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::reflect::{
ContractEnv,
DispatchError,
};
use core::{
any::TypeId,
convert::Infallible,
mem::ManuallyDrop,
};
use ink_env::{
Environment,
ReturnFlags,
};
use ink_primitives::{
Key,
KeyPtr,
};
use ink_storage::{
alloc,
alloc::ContractPhase,
traits::{
pull_spread_root,
push_spread_root,
SpreadAllocate,
SpreadLayout,
},
};
/// The root key of the ink! smart contract.
///
/// # Note
///
/// - This is the key where storage allocation, pushing and pulling is rooted
/// using the `SpreadLayout` and `SpreadAllocate` traits primarily.
/// - This trait is automatically implemented by the ink! codegen.
/// - The existence of this trait allows to customize the root key in future
/// versions of ink! if needed.
pub trait ContractRootKey {
const ROOT_KEY: Key;
}
/// Returns `Ok` if the caller did not transfer additional value to the callee.
///
/// # Errors
///
/// If the caller did send some amount of transferred value to the callee.
#[inline]
pub fn deny_payment<E>() -> Result<(), DispatchError>
where
E: Environment,
{
let transferred = ink_env::transferred_balance::<E>();
if transferred != <E as Environment>::Balance::from(0_u32) {
return Err(DispatchError::PaidUnpayableMessage)
}
Ok(())
}
/// Configuration for execution of ink! constructor.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteConstructorConfig {
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Executes the given ink! constructor.
///
/// # Note
///
/// The closure is supposed to already contain all the arguments that the real
/// constructor message requires and forwards them.
#[inline]
pub fn execute_constructor<Contract, F, R>(
config: ExecuteConstructorConfig,
f: F,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout + ContractRootKey,
F: FnOnce() -> R,
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode,
private::Seal<R>: ConstructorReturnType<Contract>,
{
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Deploy);
}
let result = ManuallyDrop::new(private::Seal(f()));
match result.as_result() {
Ok(contract) => {
// Constructor is infallible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn initialize_contract<Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue |
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: ExecuteMessageConfig,
) -> Result<Contract, DispatchError>
where
Contract: SpreadLayout + ContractEnv,
{
if !config.payable {
deny_payment::<<Contract as ContractEnv>::Env>()?;
}
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Call);
}
let root_key = Key::from([0x00; 32]);
let contract = pull_spread_root::<Contract>(&root_key);
Ok(contract)
}
/// Finalizes an ink! message call with the given configuration.
///
/// This dispatches into fallible and infallible message finalization
/// depending on the given `success` state.
///
/// - If the message call was successful the return value is simply returned
/// and cached storage is pushed back to the contract storage.
/// - If the message call failed the return value result is returned instead
/// and the transaction is signalled to be reverted.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn finalize_message<Contract, R>(
success: bool,
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if success {
finalize_infallible_message(contract, config, result)
} else {
finalize_fallible_message(result)
}
}
#[inline]
fn finalize_infallible_message<Contract, R>(
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if config.mutates {
let root_key = Key::from([0x00; 32]);
push_spread_root::<Contract>(contract, &root_key);
}
if config.dynamic_storage_alloc {
alloc::finalize();
}
if TypeId::of::<R>() != TypeId::of::<()>() {
// In case the return type is `()` we do not return a value.
ink_env::return_value::<R>(ReturnFlags::default(), result)
}
Ok(())
}
#[inline]
fn finalize_fallible_message<R>(result: &R) -> !
where
R: scale::Encode + 'static,
{
// There is no need to push back the intermediate results of the
// contract since the transaction is going to be reverted.
ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result)
}
| {
&self.0
} | identifier_body |
execution.rs | // Copyright 2018-2021 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::reflect::{
ContractEnv,
DispatchError,
};
use core::{
any::TypeId,
convert::Infallible,
mem::ManuallyDrop,
};
use ink_env::{
Environment,
ReturnFlags,
};
use ink_primitives::{
Key,
KeyPtr,
};
use ink_storage::{
alloc,
alloc::ContractPhase,
traits::{
pull_spread_root,
push_spread_root,
SpreadAllocate,
SpreadLayout,
},
};
/// The root key of the ink! smart contract.
///
/// # Note
///
/// - This is the key where storage allocation, pushing and pulling is rooted
/// using the `SpreadLayout` and `SpreadAllocate` traits primarily.
/// - This trait is automatically implemented by the ink! codegen.
/// - The existence of this trait allows to customize the root key in future
/// versions of ink! if needed.
pub trait ContractRootKey {
const ROOT_KEY: Key;
}
/// Returns `Ok` if the caller did not transfer additional value to the callee.
///
/// # Errors
///
/// If the caller did send some amount of transferred value to the callee.
#[inline]
pub fn deny_payment<E>() -> Result<(), DispatchError>
where
E: Environment,
{
let transferred = ink_env::transferred_balance::<E>();
if transferred != <E as Environment>::Balance::from(0_u32) {
return Err(DispatchError::PaidUnpayableMessage)
}
Ok(())
}
/// Configuration for execution of ink! constructor.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteConstructorConfig {
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Executes the given ink! constructor.
///
/// # Note
///
/// The closure is supposed to already contain all the arguments that the real
/// constructor message requires and forwards them.
#[inline]
pub fn execute_constructor<Contract, F, R>(
config: ExecuteConstructorConfig,
f: F,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout + ContractRootKey,
F: FnOnce() -> R,
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode,
private::Seal<R>: ConstructorReturnType<Contract>,
{
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Deploy);
}
let result = ManuallyDrop::new(private::Seal(f()));
match result.as_result() {
Ok(contract) => {
// Constructor is infallible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn initialize_contract<Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&self.0
}
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: ExecuteMessageConfig,
) -> Result<Contract, DispatchError>
where
Contract: SpreadLayout + ContractEnv,
{
if !config.payable {
deny_payment::<<Contract as ContractEnv>::Env>()?;
}
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Call);
}
let root_key = Key::from([0x00; 32]);
let contract = pull_spread_root::<Contract>(&root_key);
Ok(contract)
}
/// Finalizes an ink! message call with the given configuration.
///
/// This dispatches into fallible and infallible message finalization
/// depending on the given `success` state.
///
/// - If the message call was successful the return value is simply returned
/// and cached storage is pushed back to the contract storage.
/// - If the message call failed the return value result is returned instead
/// and the transaction is signalled to be reverted.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn finalize_message<Contract, R>(
success: bool,
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if success {
finalize_infallible_message(contract, config, result)
} else {
finalize_fallible_message(result)
}
}
#[inline]
fn finalize_infallible_message<Contract, R>(
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if config.mutates {
let root_key = Key::from([0x00; 32]);
push_spread_root::<Contract>(contract, &root_key);
}
if config.dynamic_storage_alloc |
if TypeId::of::<R>() != TypeId::of::<()>() {
// In case the return type is `()` we do not return a value.
ink_env::return_value::<R>(ReturnFlags::default(), result)
}
Ok(())
}
#[inline]
fn finalize_fallible_message<R>(result: &R) -> !
where
R: scale::Encode + 'static,
{
// There is no need to push back the intermediate results of the
// contract since the transaction is going to be reverted.
ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result)
}
| {
alloc::finalize();
} | conditional_block |
execution.rs | // Copyright 2018-2021 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::reflect::{
ContractEnv,
DispatchError,
};
use core::{
any::TypeId,
convert::Infallible,
mem::ManuallyDrop,
};
use ink_env::{
Environment,
ReturnFlags,
};
use ink_primitives::{
Key,
KeyPtr,
};
use ink_storage::{
alloc,
alloc::ContractPhase,
traits::{
pull_spread_root,
push_spread_root,
SpreadAllocate,
SpreadLayout,
},
};
/// The root key of the ink! smart contract.
///
/// # Note
///
/// - This is the key where storage allocation, pushing and pulling is rooted
/// using the `SpreadLayout` and `SpreadAllocate` traits primarily.
/// - This trait is automatically implemented by the ink! codegen.
/// - The existence of this trait allows to customize the root key in future
/// versions of ink! if needed.
pub trait ContractRootKey {
const ROOT_KEY: Key;
}
/// Returns `Ok` if the caller did not transfer additional value to the callee.
///
/// # Errors
///
/// If the caller did send some amount of transferred value to the callee.
#[inline]
pub fn deny_payment<E>() -> Result<(), DispatchError>
where
E: Environment,
{
let transferred = ink_env::transferred_balance::<E>();
if transferred != <E as Environment>::Balance::from(0_u32) {
return Err(DispatchError::PaidUnpayableMessage)
}
Ok(())
}
/// Configuration for execution of ink! constructor.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteConstructorConfig {
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Executes the given ink! constructor.
///
/// # Note
///
/// The closure is supposed to already contain all the arguments that the real
/// constructor message requires and forwards them.
#[inline]
pub fn execute_constructor<Contract, F, R>(
config: ExecuteConstructorConfig,
f: F,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout + ContractRootKey,
F: FnOnce() -> R,
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode,
private::Seal<R>: ConstructorReturnType<Contract>,
{
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Deploy);
}
let result = ManuallyDrop::new(private::Seal(f()));
match result.as_result() {
Ok(contract) => {
// Constructor is infallible or is fallible but succeeded.
//
// This requires us to sync back the changes of the contract storage.
let root_key = <Contract as ContractRootKey>::ROOT_KEY;
push_spread_root::<Contract>(contract, &root_key);
if config.dynamic_storage_alloc {
alloc::finalize();
}
Ok(())
}
Err(_) => {
// Constructor is fallible and failed.
//
// We need to revert the state of the transaction.
ink_env::return_value::<
<private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue,
>(
ReturnFlags::default().set_reverted(true),
result.return_value(),
)
}
}
}
/// Initializes the ink! contract using the given initialization routine.
///
/// # Note
///
/// - This uses `SpreadAllocate` trait in order to default initialize the
/// ink! smart contract before calling the initialization routine.
/// - This either returns `Contract` or `Result<Contract, E>` depending
/// on the return type `R` of the initializer closure `F`.
/// If `R` is `()` then `Contract` is returned and if `R` is any type of
/// `Result<(), E>` then `Result<Contract, E>` is returned.
/// Other return types for `F` than the ones listed above are not allowed.
#[inline]
pub fn | <Contract, F, R>(
initializer: F,
) -> <R as InitializerReturnType<Contract>>::Wrapped
where
Contract: ContractRootKey + SpreadAllocate,
F: FnOnce(&mut Contract) -> R,
R: InitializerReturnType<Contract>,
{
let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY);
let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr);
let result = initializer(&mut instance);
result.into_wrapped(instance)
}
mod private {
/// Seals the implementation of `ContractInitializerReturnType`.
pub trait Sealed {}
impl Sealed for () {}
impl<T, E> Sealed for Result<T, E> {}
/// A thin-wrapper type that automatically seals its inner type.
///
/// Since it is private it can only be used from within this crate.
/// We need this type in order to properly seal the `ConstructorReturnType`
/// trait from unwanted external trait implementations.
#[repr(transparent)]
pub struct Seal<T>(pub T);
impl<T> Sealed for Seal<T> {}
}
/// Guards against using invalid contract initializer types.
///
/// # Note
///
/// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type.
/// If the contract initializer returns `Result::Err` the utility
/// method that is used to initialize an ink! smart contract will
/// revert the state of the contract instantiation.
pub trait ConstructorReturnType<C>: private::Sealed {
/// Is `true` if `Self` is `Result<C, E>`.
const IS_RESULT: bool = false;
/// The error type of the constructor return type.
///
/// # Note
///
/// For infallible constructors this is `core::convert::Infallible`.
type Error;
/// The type of the return value of the constructor.
///
/// # Note
///
/// For infallible constructors this is `()` whereas for fallible
/// constructors this is the actual return value. Since we only ever
/// return a value in case of `Result::Err` the `Result::Ok` value
/// does not matter.
type ReturnValue;
/// Converts the return value into a `Result` instance.
///
/// # Note
///
/// For infallible constructor returns this always yields `Ok`.
fn as_result(&self) -> Result<&C, &Self::Error>;
/// Returns the actual return value of the constructor.
///
/// # Note
///
/// For infallible constructor returns this always yields `()`
/// and is basically ignored since this does not get called
/// if the constructor did not fail.
fn return_value(&self) -> &Self::ReturnValue;
}
impl<C> ConstructorReturnType<C> for private::Seal<C> {
type Error = Infallible;
type ReturnValue = ();
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
Ok(&self.0)
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&()
}
}
impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> {
const IS_RESULT: bool = true;
type Error = E;
type ReturnValue = Result<C, E>;
#[inline]
fn as_result(&self) -> Result<&C, &Self::Error> {
self.0.as_ref()
}
#[inline]
fn return_value(&self) -> &Self::ReturnValue {
&self.0
}
}
/// Trait used to convert return types of contract initializer routines.
///
/// Only `()` and `Result<(), E>` are allowed contract initializer return types.
/// For `WrapReturnType<C>` where `C` is the contract type the trait converts
/// `()` into `C` and `Result<(), E>` into `Result<C, E>`.
pub trait InitializerReturnType<C>: private::Sealed {
type Wrapped;
/// Performs the type conversion of the initialization routine return type.
fn into_wrapped(self, wrapped: C) -> Self::Wrapped;
}
impl<C> InitializerReturnType<C> for () {
type Wrapped = C;
#[inline]
fn into_wrapped(self, wrapped: C) -> C {
wrapped
}
}
impl<C, E> InitializerReturnType<C> for Result<(), E> {
type Wrapped = Result<C, E>;
#[inline]
fn into_wrapped(self, wrapped: C) -> Self::Wrapped {
self.map(|_| wrapped)
}
}
/// Configuration for execution of ink! messages.
#[derive(Debug, Copy, Clone)]
pub struct ExecuteMessageConfig {
/// Yields `true` if the ink! message accepts payment.
///
/// # Note
///
/// If no ink! message within the same ink! smart contract
/// is payable then this flag will be `true` since the check
/// then is moved before the message dispatch as an optimization.
pub payable: bool,
/// Yields `true` if the ink! message might mutate contract storage.
///
/// # Note
///
/// This is usually true for `&mut self` ink! messages.
pub mutates: bool,
/// Yields `true` if the dynamic storage allocator has been enabled.
///
/// # Note
///
/// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`.
pub dynamic_storage_alloc: bool,
}
/// Initiates an ink! message call with the given configuration.
///
/// Returns the contract state pulled from the root storage region upon success.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn initiate_message<Contract>(
config: ExecuteMessageConfig,
) -> Result<Contract, DispatchError>
where
Contract: SpreadLayout + ContractEnv,
{
if !config.payable {
deny_payment::<<Contract as ContractEnv>::Env>()?;
}
if config.dynamic_storage_alloc {
alloc::initialize(ContractPhase::Call);
}
let root_key = Key::from([0x00; 32]);
let contract = pull_spread_root::<Contract>(&root_key);
Ok(contract)
}
/// Finalizes an ink! message call with the given configuration.
///
/// This dispatches into fallible and infallible message finalization
/// depending on the given `success` state.
///
/// - If the message call was successful the return value is simply returned
/// and cached storage is pushed back to the contract storage.
/// - If the message call failed the return value result is returned instead
/// and the transaction is signalled to be reverted.
///
/// # Note
///
/// This work around that splits executing an ink! message into initiate
/// and finalize phases was needed due to the fact that `is_result_type`
/// and `is_result_err` macros do not work in generic contexts.
#[inline]
pub fn finalize_message<Contract, R>(
success: bool,
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if success {
finalize_infallible_message(contract, config, result)
} else {
finalize_fallible_message(result)
}
}
#[inline]
fn finalize_infallible_message<Contract, R>(
contract: &Contract,
config: ExecuteMessageConfig,
result: &R,
) -> Result<(), DispatchError>
where
Contract: SpreadLayout,
R: scale::Encode + 'static,
{
if config.mutates {
let root_key = Key::from([0x00; 32]);
push_spread_root::<Contract>(contract, &root_key);
}
if config.dynamic_storage_alloc {
alloc::finalize();
}
if TypeId::of::<R>() != TypeId::of::<()>() {
// In case the return type is `()` we do not return a value.
ink_env::return_value::<R>(ReturnFlags::default(), result)
}
Ok(())
}
#[inline]
fn finalize_fallible_message<R>(result: &R) -> !
where
R: scale::Encode + 'static,
{
// There is no need to push back the intermediate results of the
// contract since the transaction is going to be reverted.
ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result)
}
| initialize_contract | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.