| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:30:29.924754Z" |
| }, |
| "title": "Plan Explanations that Exploit a Cognitive Spatial Model", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Korpan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of New York", |
| "location": {} |
| }, |
| "email": "rkorpan@gradcenter.cuny.edu" |
| }, |
| { |
| "first": "Susan", |
| "middle": [ |
| "L" |
| ], |
| "last": "Epstein", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of New York", |
| "location": {} |
| }, |
| "email": "susan.epstein@hunter.cuny.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Ideally, people who navigate together in a complex indoor space share a mental model that facilitates explanation. This paper reports on a robot control system whose cognitive world model is based on spatial affordances that generalize over its perceptual data. Given a target, the control system formulates multiple plans, each with a model-relevant metric, and selects among them. As a result, it can provide readily understandable natural language about the robot's intentions and confidence, and generate diverse, contrastive explanations that reference the acquired spatial model. Empirical results in large, complex environments demonstrate the robot's ability to provide humanfriendly explanations in natural language.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Ideally, people who navigate together in a complex indoor space share a mental model that facilitates explanation. This paper reports on a robot control system whose cognitive world model is based on spatial affordances that generalize over its perceptual data. Given a target, the control system formulates multiple plans, each with a model-relevant metric, and selects among them. As a result, it can provide readily understandable natural language about the robot's intentions and confidence, and generate diverse, contrastive explanations that reference the acquired spatial model. Empirical results in large, complex environments demonstrate the robot's ability to provide humanfriendly explanations in natural language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Inspired by recent recommendations for spoken language interaction with robots (Marge et al., 2020) , this paper introduces WHY, an approach to communicate a robot's planning rationales, intentions, and confidence in human-friendly spatial language. Our thesis is that a plan based on spatial representations acquired from travel experience can ground its objectives and support explainable path planning. The principal results of this paper are empirical demonstrations of WHY's ability to explain and contrast plans in readily-understandable natural language.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 99, |
| "text": "(Marge et al., 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Given sensor data and a metric map (e.g., a floor plan), the task of our autonomous robot navigator is to travel to target locations in a large, complex, human-centric, indoor space (henceforward, world) . The robot's control system integrates acquired spatial knowledge into a cognitively-based architecture that combines planning with reactivity, heuristics, and situational reasoning. Given a target, the control system creates a plan, a sequence of intermediate locations (waypoints) to reach it. This plan is expected to balance multiple objectives, combine continuous and discrete spatial representations, and encourage a human's trust.", |
| "cite_spans": [ |
| { |
| "start": 182, |
| "end": 203, |
| "text": "(henceforward, world)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Traditional navigation planners use a cost graph (also known as a costmap) where each node is a point in unobstructed space and each edge connects a pair of nodes with a weight for the cost to move between them. A popular cost graph is based on an occupancy grid, uniform square cells superimposed on a two-dimensional metric map. Each edge in the graph represents two adjacent unobstructed cells, labeled with the Euclidean distance between their centers. In a fine-grained grid, however, optimal planners (e.g., A* (Hart et al., 1968) ) hug obstacles so tightly that their plans require tight maneuvers to reach some waypoints and may fail as actuator and sensor errors accumulate near them.", |
| "cite_spans": [ |
| { |
| "start": 517, |
| "end": 536, |
| "text": "(Hart et al., 1968)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To bias plans toward its particular objective (a spatial representation or commonsense rationale), a planner modifies the weights in its own copy of the occupancy-grid graph. The fixed underlying graph structure allows our approach to evaluate a plan within any such modified graph. Voting then selects the plan that best satisfies all the objectives. This approach facilitates contrastive natural-language explanations of the chosen plan with respect to each objective. The control system reports on its beliefs, intentions, and confidence with spatial language. For example, \"Although there may be another way that is somewhat shorter, I think my way is a lot better at going through open areas.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The next sections provide related work and describe the acquired spatial model. Subsequent sections cover the modified graphs, vote-based planning, and how WHY explains plans. The last sections describe empirical results and future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A spatial representation of its world is essential to a robot control system that navigates efficiently and explains its behavior clearly. Grounded communication between a robot and a person, however, requires a shared spatial representation. This section first describes work on human cognitive maps that inspired our control system's spatial model. It then details approaches that describe and explain the robot's behavior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A cognitive map is a compact, mental spatial representation of a world, built by a person as she moves through that world (Golledge, 1999) . To reduce her cognitive load, a person reasons from a cognitive map that incorporates landmarks, route knowledge, and survey knowledge (Tversky, 1993) . Landmarks represent locations in the map, routes represent lines that connect them, and survey knowledge captures spatial relations. Although it has been suggested that cognitive maps use metric distances and angles (Gallistel, 1990) , more recent work indicates that cognitive maps have a nonmetric, qualitative topological structure (Foo et al., 2005) . Other recent work suggests that people use a cognitive graph with labeled metric information that captures connectivity and patterns (Chrastil and Warren, 2014; Warren et al., 2017 ).", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 138, |
| "text": "(Golledge, 1999)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 276, |
| "end": 291, |
| "text": "(Tversky, 1993)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 510, |
| "end": 527, |
| "text": "(Gallistel, 1990)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 629, |
| "end": 647, |
| "text": "(Foo et al., 2005)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 783, |
| "end": 810, |
| "text": "(Chrastil and Warren, 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 811, |
| "end": 830, |
| "text": "Warren et al., 2017", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "An affordance is a characteristic of the world that enables the execution of some action (Gibson, 1977) . Affordance-based theories of spatial cognition posit a tight relationship between the specific dynamics of a world and the decisions made by an individual there (Fajen and Phillips, 2013). Here, a spatial affordance is an abstract representation of the world that facilitates navigation. This paper introduces path planning in cost graphs based on acquired spatial affordances. People generalize structured representations across domains on similar tasks (Pouncy et al., 2021) much the way the spatial model described here generalizes affordances for use in different worlds.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 103, |
| "text": "(Gibson, 1977)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 561, |
| "end": 582, |
| "text": "(Pouncy et al., 2021)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A control system can learn and use a cognitive map of its world for robot navigation. For example, the Spatial Semantic Hierarchy (SSH) modeled a cognitive map with hierarchical metric and topological representations (Kuipers, 2000) . Although SSH's cognitive map bears some similarity to the one used here, it did not explain plans. Other approaches used semantics to create a meaningfullylabeled metric map (Kostavelis and Gasteratos, 2015) . While these maps provide a qualitative context in which to ground a controller's language, they do not necessarily align with human cognitive maps. Moreover, control systems often use semantic maps for communication but another representation for reasoning and decision-making. Instead, this paper shows how a single, affordance-based representation supports all of those processes.", |
| "cite_spans": [ |
| { |
| "start": 217, |
| "end": 232, |
| "text": "(Kuipers, 2000)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 409, |
| "end": 442, |
| "text": "(Kostavelis and Gasteratos, 2015)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Indoors, an autonomous robot may interact with people as it navigates to its target. A human collaborator is more likely to accept, trust, and understand a robot that can explain its behavior (Rosenfeld and Richardson, 2019) . Rather than describe an event or summarize its causes, an explanation compares counterfactual cases, includes causes selectively, and recognizes people as social beings with beliefs and intentions (Miller, 2019) . A contrastive explanation compares the reason for a decision to another plausible rationale (Hoffmann and Magazzeni, 2019) . Human subjects generally prefer such explanations that focus on the difference between the robot's planned route and their own (e.g., \"my route is shorter, but overlaps more and produces less reward\") (Perelman et al., 2020).", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 224, |
| "text": "(Rosenfeld and Richardson, 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 424, |
| "end": 438, |
| "text": "(Miller, 2019)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 533, |
| "end": 563, |
| "text": "(Hoffmann and Magazzeni, 2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Detailed technical logs of a robot's experience were originally available only to trained researchers (Landsiedel et al., 2017; Scalise et al., 2017) . Recent work, however, has generated natural language descriptions of a robot's travelled path from them. These focus on abstraction, specificity, and locality Perera et al., 2016) or on sentence correctness, completeness, and conciseness (Barrett et al., 2017) . All, however, required a labeled dataset or a semantic map. Other recent work partitions a plan into actions and uses language templates to generate descriptions of each action in the context of a collaborating robot team (Singh et al., 2021) . WHY focuses on explanations for the reasons behind the robot's decisions rather than descriptions of the robot's behavior.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 127, |
| "text": "(Landsiedel et al., 2017;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 128, |
| "end": 149, |
| "text": "Scalise et al., 2017)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 311, |
| "end": 331, |
| "text": "Perera et al., 2016)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 390, |
| "end": 412, |
| "text": "(Barrett et al., 2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 637, |
| "end": 657, |
| "text": "(Singh et al., 2021)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To produce explanations, others have selected potentially suboptimal plans (Fox et al., 2017; Chakraborti et al., 2019) or readily understandable behaviors (Huang et al., 2019), or relied on classical planning (Magnaguagno et al., 2017; Grea et al., 2018; Krarup et al., 2019) or on logic (Seegebarth et al., 2012; Nguyen et al., 2020) . None of that work, however, explains in natural language. The approach closest to the one presented here provides contrastive explanations for multi-objective path planning in natural language as a Markov decision process (Sukkerd et al., 2020) , but considers fewer objectives, requires a hand-labeled map, and has been evaluated only in much smaller worlds.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 93, |
| "text": "(Fox et al., 2017;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 94, |
| "end": 119, |
| "text": "Chakraborti et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 210, |
| "end": 236, |
| "text": "(Magnaguagno et al., 2017;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 237, |
| "end": 255, |
| "text": "Grea et al., 2018;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 256, |
| "end": 276, |
| "text": "Krarup et al., 2019)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 289, |
| "end": 314, |
| "text": "(Seegebarth et al., 2012;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 315, |
| "end": 335, |
| "text": "Nguyen et al., 2020)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 560, |
| "end": 582, |
| "text": "(Sukkerd et al., 2020)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The context of this work is SemaFORR, a cognitively-based control system for autonomous indoor navigation (Epstein et al., 2015; ", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 128, |
| "text": "(Epstein et al., 2015;", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Spatial affordances", |
| "sec_num": "3" |
| }, |
| { |
| "text": "d = x, y, \u03b8, V ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Spatial affordances", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SemaFORR records the robot's location (x, y), its orientation \u03b8, and its view V , the data from its onboard range finder. After each target, SemaFORR identifies spatial affordances for its acquired model of freespace, the unobstructed areas in a world. The model can be used alone or with a metric map. At decision point d, SemaFORR learns a region, a circle in freespace with center at (x, y) and radius equal to the minimum distance reported by V . Accumulated contradictory or overlapping regions are resolved after each target. An exit represents access to freespace, a point where the robot's path once crossed the region's perimeter. A door is an arc on a region's perimeter, a continuous generalization of finitely many, relatively close exits between its endpoints. Figure 1 (a) shows acquired regions with exits and doors (drawn for clarity as secants to their respective arcs). Although regions approximate what appear to be rooms in the figure, they record only freespace, not walls.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 774, |
| "end": 782, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Spatial affordances", |
| "sec_num": "3" |
| }, |
| { |
| "text": "A trail is a refined version of the robot's path toward its target. The algorithm that creates trails heuristically smooths the robot's paths and eliminates digressions. The remaining (usually far fewer) decision points are trail markers. As in Figure 1 (b), the sequence of line segments defined by consecutive trail markers is typically more direct than the original path, but rarely optimal. A conveyor is a freespace cell in a 2 \u00d7 2m grid super- Figure 1 (c). A hallway represents well-travelled routes in some angular direction (vertical, horizontal, major diagonal, or minor diagonal). A hallway generalizes line segments between consecutive decision points to find relatively straight, narrow, continuous freespace with both length and width. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 253, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 450, |
| "end": 458, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Spatial affordances", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Planning for navigation requires a graphical representation of the world's freespace. To produces an optimal plan, A* searches a cost graph G based on an occupancy grid with edge weights for Euclidean distance. SemaFORR constructs a set of graphs; each begins with G but modifies its edge weights to align with a particular objective. This biases search toward that objective but still considers plan length. In practice, an occupancy grid should be sufficiently fine to represent obstacles accurately. Table 1 lists SemaFORR's planners and their objectives. Given a target, each planner formulates its own plan to reach it, one biased toward its own objective. Two planners focus on commonsense: FASTP searches the original G, but SAFEP increases G's edge weights based on an edge's proximity to obstacles. Two others focus on exploration to acquire more knowledge about their world. EXPLOREP creates a grid that tallies how frequently the robot's path history passes through each cell, and uses those values to increase edge weights where it has already traveled. Because the acquired spatial model summarizes experience more compactly than a path, NOVELP explores areas not covered by the model. It increases a weight if the edge overlaps an acquired affordance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 503, |
| "end": 510, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Modified cost graphs", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Four planners exploit a particular kind of spatial affordance with changes to edge weights. (Values based on preliminary testing bias plans to pursue but not overemphasize affordances.) REGIONP's cost graph modifies each edge's weight w based on the location of its endpoints. If both lie in the same region, w goes unchanged; if neither lies in a region w becomes 10w. Otherwise, for the one endpoint v not in a region, w becomes 1.5w if v is within 0.5m of a door and an exit, 1.75w if v is within 0.5m of a door or an exit, and otherwise 2w. This biases plans to pass through regions because it increases edge costs outside them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modified cost graphs", |
| "sec_num": "4" |
| }, |
| { |
| "text": "HALLWAYP and TRAILP modify their weights similarly, with respective conditions \"lie in one hallway\" and \"lie within 0.5m of a trail marker.\" If both endpoints of an edge meet the condition, w goes unchanged; if neither does, w becomes 10w. Otherwise, when just one endpoint meets the condition, w becomes 1.5w. To bias plans toward high-count conveyors, CONVEYP considers the counters c 1 and c 2 for the cells where the endpoints of an edge with weight w lies. If both are non-zero, w becomes w + 2/(c 1 + c 2 ); otherwise, w becomes 10w.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modified cost graphs", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Because SemaFORR's spatial model focuses on freespace, these modified cost graphs allow a robot control system to encourage travel there but also incorporate the metric cost graph where the model lacks knowledge. The region-based cost graph, for example, imposes relatively lower costs only for doors and exits that the robot has successfully exploited earlier, and thus prioritizes them. Because weights only increase, Euclidean distance remains an admissible heuristic for A*, that is, it never overestimates the actual cost to the target's location.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modified cost graphs", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To choose paths, people use many different objectives that reflect their motivation (Golledge, 1999) . A cognitively-based robot navigator should also incorporate and balance a variety of path-selection heuristics. SemaFORR's planners can be used together because they originate from the same cost graph. This section explains Algorithm 1, pseudocode for how voting balances the planners' objectives to select a plan.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 100, |
| "text": "(Golledge, 1999)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Voting among planners", |
| "sec_num": "5" |
| }, |
| { |
| "text": "SemaFORR constructs multiple plans that optimize a single objective and then uses voting to select the plan that maximally satisfies the most objectives. First, each planner j constructs an op-Algorithm 1: Voting-based planning Input: planners J, spatial model M , basic cost graph G for each planner j \u2208 J do Set j's cost graph G j to a copy of G Modify G j 's weights based on j and M With A*, find optimal plan P j in G j for each planner j \u2208 J do for each planner i \u2208 J do", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Voting among planners", |
| "sec_num": "5" |
| }, |
| { |
| "text": "C ij \u2190 cost of plan P i in G j", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Voting among planners", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Normalize plan scores C ij in [0, 10] for each plan P i do Score i \u2190 J j=1 C ij best \u2190 argmin i Score i return P best timal plan P j for its objective as a sequence of waypoints in its modified cost graph G j . This guarantees that each submitted plan is optimal for at least one objective.", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 33, |
| "text": "[0,", |
| "ref_id": null |
| }, |
| { |
| "start": 34, |
| "end": 37, |
| "text": "10]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Voting among planners", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Next, each planner's objective is used to evaluate every plan. All the cost graphs have the same nodes and edges, so to evaluate planner i's plan P i from the perspective of planner j, SemaFORR simply sums the edge weights in G j for the sequence of edges specified by P i . The resultant scores C ij are then normalized in [0, 10] for each j. SemaFORR seeks to minimize its objectives. Thus a C ij value near 0 indicates that plan P i closely conforms to objective j, while a score near 10 indicates that plan P i conflicts with objective j. Voting selects the plan with the lowest total score across all objectives and breaks ties at random.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Voting among planners", |
| "sec_num": "5" |
| }, |
| { |
| "text": "SemaFORR uses WHY to explain its long-range perspective in natural language. WHY exploits differences among planners' objectives to produce clear, concise, contrastive explanations for a plan quickly. WHY assumes that the robot's human companion seeks a shortest-length plan, and compares that to SemaFORR's plan. Although we assume here that a goal-directed human navigator would seek to minimize travel distance, another objective, including those in Table 1 , could label the foundational cost graph G instead.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 453, |
| "end": 460, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Contrastive explanations", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Throughout this section, N represents a function that translates its argument (a planner or a metric value) into natural language. Given a real-valued Figure 2 : WHY compares FASTP's (red) plan to TRAILP's (blue) one biased by SemaFORR's (green) trails. It explains, \"Although there may be another way that is a lot shorter, I think my way is a lot better at following ways we've gone before.\" metric m for some aspect (e.g., confidence or enthusiasm) of the decision process, M(m) bins m's value into an ordered partition of m's range and N (M(m)) translates that bin to a natural language phrase. For example, m could measure the desire to select one plan over the others, and the value partition could distinguish a strong preference for that plan from a weak one. Thus, if m \u2208 (0, +\u221e) were partitioned as {(0, 5), [5, +\u221e)}, N (m < 5) could be \"a little\" and N (m \u2265 5) \"a lot.\" This allows WHY to hedge in its responses, much the way people explain their reasoning when they are uncertain (Markkanen and Schr\u00f6der, 1997) .", |
| "cite_spans": [ |
| { |
| "start": 992, |
| "end": 1022, |
| "text": "(Markkanen and Schr\u00f6der, 1997)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 151, |
| "end": 159, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Contrastive explanations", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Human and robot plans to reach the same target may differ because they lack a common objective. WHY's response to this question presumes that a human plans from one perspective, objective \u03b2 H , while the robot plans from another perspective, objective \u03b2 R . Explanations for a plan assume a human has an alternative objective. Henceforward, \u03b2 H is \"take the shortest path.\" WHY models the human questioner with \u03b2 H to produce plan P H , a prediction of the human's implicit plan. Algorithm 2 is pseudocode for WHY's plan-explanation procedure. WHY takes as input the robot's plan P R and objective \u03b2 R , and the alternative plan P H and objective \u03b2 H it attributes to the human questioner. \u03b2 H (P ) measures plan length and \u03b2 R (P ) measures plan cost in P R 's graph. In the running example shown in Figure 2 , WHY ex-Algorithm 2: Explanation procedure Input: planning objectives \u03b2 R and \u03b2 H , plans P R and P H Output: explanation", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 801, |
| "end": 809, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "D R = \u03b2 R (P R ) \u2212 \u03b2 R (P H ) D H = \u03b2 H (P R ) \u2212 \u03b2 H (P H ) switch mode(D R , D H ) do", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "case D R = D H = 0 do explanation \u2190 sentence based on template for equivalent plans case D R < 0 and D H > 0 do explanation \u2190 sentence for \u03b2 R , \u03b2 H case D R < 0 and D H = 0 do explanation \u2190 sentence for \u03b2 R return explanation plains SemaFORR's preference for its plan P R from TRAILP where \u03b2 R is TRAILP's objective (\"exploit trail markers\"). WHY translates \u03b2 H and \u03b2 R with Table 2 as \"short\" and \"follows ways we've gone before,\" respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 376, |
| "end": 383, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "If voting selected the plan constructed by FASTP (i.e., the shortest-length plan), then Why responds with \"I decided to go this way because I agree that we should take the shortest route.\" Otherwise, to compare P R with P H , WHY calculates their difference from two perspectives: D H from the human's perspective (e.g., length), and D R from the robot's perspective (e.g., proximity to trails). WHY places these differences in user-specified bins that represent a human perspective on the objectives. Table 3 provides language for these differences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 502, |
| "end": 510, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "The relative size of the differences determines an applicable template. If both D H and D R , as defined in Algorithm 2, are 0, then the plans equally address the two objectives, and WHY explains \"I decided to go this way because I think it's just as N (\u03b2 H ) and equally N (\u03b2 R ).\" Otherwise, the plans differ with respect to one or both objectives. If D R is negative (e.g., P R is more aligned with trails), then WHY instantiates this template: 1: Although there may be another way that is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "N (M(D H )) N * (\u03b2 H ), 2: I think my way is N (M(D R )) N * (\u03b2 R ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "where N * (\u03b2) is a comparator for \u03b2 (e.g., \"shorter\" or \"better at following ways we've gone before\"). For example, \"Although there may be another way that is somewhat shorter, I think my way is a lot better at following ways we've gone before.\" WHY omits line 1 in the template if D H = 0. Other cases, Planner", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Intervals M(D) N (M(D)) (0, 1] a bit FASTP (1, 10] somewhat (10, +\u221e) a lot (\u2212\u221e, \u2212a] a lot All others (\u2212a, \u2212b] somewhat (\u2212b, +\u221e) a bit", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "where D H < 0 or D R > 0 cannot occur because each planner is optimal with respect to its own cost graph and objective, as described in Section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why does your plan go this way?", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "WHY also addresses the question \"Why do you prefer your plan?\" Unlike the previous response, which contrasted the human's objective with the robot's, this response has the robot explain its objective. If voting selects the FASTP plan, which the robot assumes has the same objective as its human companion, WHY would respond \"Actually, I agree that we should take the shortest route.\" Otherwise, WHY uses the differences D H and D R from Algorithm 2. If they are both 0, then WHY replies, \"I think both plans are equally good.\" Otherwise, WHY responds with the template \"I prefer my plan", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "because it's N (M(D R )) N * (\u03b2 R )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": ".\" For example, to explain why SemaFORR chose TRAILP's plan, WHY might say \"I prefer my plan because it's a lot better at following ways we've gone before.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "6.3 What's another way we could go? Figure 3 shows an example where WHY responds to \"What's another way we could go?\" Because WHY has access to two plans from SemaFORR Figure 3 : Acquired conveyors in green, with darker higher-count cells. Voting chose CONVEYP's (blue) plan which is drawn to high-count cells. In response to \"What's another way we could go?\" WHY compares the conveyor plan with FASTP's (red) plan: \"We could go that way since it's a bit shorter but it could also be a bit worse at going through well-traveled areas.\"", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 36, |
| "end": 44, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 168, |
| "end": 176, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "(P R and P H ), it can provide P H , the shortest-path plan, as the alternative plan in response. If voting selects the FASTP plan, which uses the same objective as the robot's human companion, then WHY responds \"Yours is the best way to go.\" Otherwise, it instantiates the template: \"We could go your way since it's", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "N (M(D H )) N * (\u03b2 H ) but it could also be N (M(D R )) N (\u03b2 R )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": ".\" Here N denotes an opposite comparator (e.g., \"longer\" or \"worse at following ways we've gone before\"). For example, an explanation is \"We could go that way since it's somewhat shorter but it could also be a lot worse at following ways we've gone before.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "6.4 How sure are you about your plan?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "In response to \"How sure are you about your plan?\" WHY explains its confidence that P R meets its objective. Figure 4 shows an example. WHY uses the language for M(D R ) and M(D H ) from Table 3 to extract a value C = N (M(D R , D H )) from Table 4 . WHY then instantiates \"I'm N (C) sure because\" followed by line C: Figure 4 : Highlighted sections of FASTP's (red) plan and TRAILP's (blue) plan to follow acquired (green circle) trail markers. WHY explains \"I'm really sure because my plan is a lot better at following ways we've gone before and only a bit longer than your plan.\" Table 4 : Table 3 . Here, 1 denotes \"really,\" 2 = \"only somewhat,\" and 3 = \"not.\"", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 117, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 187, |
| "end": 194, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 241, |
| "end": 249, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 319, |
| "end": 327, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 584, |
| "end": 591, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 594, |
| "end": 601, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Language N (M(D R , D H )) for confidence compares M(D R ) and M(D H ) from", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "N (M(D H )) N (M(D R )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": ") \"a lot\" \"somewhat\" \"a bit\" \"a lot\" 2 1 1 \"somewhat\" 3 2 1 \"a bit\" 3 3 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "1: my plan is N (M(D R )) N * (\u03b2 R ) and only N (M(D H )) N (\u03b2 H ) than yours. 2: even though my plan is N (M(D R )) N * (\u03b2 R ), it is also N (M(D H )) N (\u03b2 H ) than yours. 3: my plan is N (D H ) N (\u03b2 H ) and only N (D R ) N * (\u03b2 R ) than yours 6", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": ".5 How are we getting there?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "\"How are we getting there?\" shows a human companion's uncertainty about the route planned to reach their shared target. Rather than reference the planner's objective, WHY treats this as a request for a high-level description of P R itself, and uses the segments between consecutive waypoints in Se-maFORR's plan P R to produces natural language that describes it. Figure 5 shows an example. WHY anticipates travel with P R as an ordered sequence of locations from the robot's current location through P R 's waypoints and then to the target. First, WHY forms plan segments from consecutive locations in P R and computes each segment's length and angular direction \u03c7 (based on the angle between its endpoints relative to a fixed horizontal axis). It then bins \u03c7 within an interval M(\u03c7) and assigns a label N (M(\u03c7)) as shown in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 364, |
| "end": 372, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 826, |
| "end": 833, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "These labels are allocentric, and therefore less Figure 5 : SemaFORR's FASTP plan with 92 waypoints from the robot to its target. WHY explains in 9 clauses, \"We will go straight about 20 meters, turn right a little, go straight about 4 meters, turn left a little, go straight about 20 meters, turn left a little, go straight about 8 meters, turn left a little, and go straight about 4 meters to reach our target.\" Table 5 : Labels N (M(\u03c7)) for segment angle intervals M(\u03c7). Language N (\u03b1) adjusts the change in consecutive angular directions for full 2\u03c0 rotation: 7\u03c0 8 ) 8 6 turn right otherwise 1 7 turn right a little appropriate indoors. WHY translates them to an egocentric frame of reference, as if the robot and its companion faced the same way along the intended route. The change in consecutive N (M(\u03c7)) labels represents the change in direction from one path segment to the next. N (\u03b1) is language for \u03b1, the angular change in \u03c7 from one segment to the next. For example, if the first segment in P R were labeled 2 and the second segment labeled 7, then \u03b1 = 5 which Table 5 translates as \"turn hard right.\" Plan P R now has a sequence of phrases for the points where two consecutive segments meet. WHY inserts a \"go straight\" after each \"turn\" phrase. WHY then summarizes consecutive \"go straight\" phrases into a single one (since they indicate no change in direction) with a length L, the sum of the lengths of the segments that induced it. These Ls are binned into intervals and reported in natural language (e.g., 5.7m lies in (4, 6] with language \"about 6 meters\").", |
| "cite_spans": [ |
| { |
| "start": 564, |
| "end": 566, |
| "text": "7\u03c0", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 49, |
| "end": 57, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 414, |
| "end": 421, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 1075, |
| "end": 1082, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "\u03b1 = N (M(\u03c7 k )) \u2212 N (M(\u03c7 k\u22121 ))) mod 8. M(\u03c7) N (M(\u03c7)) \u03b1 Phrase N (\u03b1) [ \u22127\u03c0 8 , \u22125\u03c0 8 ) 2 0 go straight [ \u22125\u03c0 8 , \u22123\u03c0 8 ) 3 1 turn left a little [ \u22123\u03c0 8 , \u2212\u03c0 8 ) 4 2 turn left [ \u2212\u03c0 8 , \u03c0 8 ) 5 3 turn hard left [ \u03c0 8 , 3\u03c0 8 ) 6 4 turn around [ 3\u03c0 8 , 5\u03c0 8 ) 7 5 turn hard right [ 5\u03c0 8 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "WHY combines the list of phrases and lengths appropriately to form a succinct explanation with the template \"We will [N (\u03b1) {about N (M(L))},] to reach our target.\" It repeats the material in square brackets for each N (\u03b1), and includes the material in curly brackets only when N (\u03b1) is \"go straight.\" In summary, WHY produces natural explanations for a robot's plan as it travels through a complex world. These explanations are essential for human-friendly autonomous indoor navigation and require an assumption about its human collaborator's objective. Our approach explains the robot's plan, responds to questions about alternatives, and expresses a human-friendly level of confidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Why do you prefer your plan?", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "SemaFORR with WHY is evaluated on three challenging real worlds: M5, H10, and G5. M5 is the fifth floor of New York's Museum of Modern Art. It is 54 \u00d7 62m and has 1585m 2 freespace. H10 is the 89 \u00d7 58m tenth floor of an academic building with 2627m 2 of freespace and 75 rooms. G5 is the 110 \u00d7 70m fifth floor of a renovated Manhattan building. G5 has about 4021m 2 of freespace, 180 rooms, and many intersecting hallways. It is known for its ability to perplex human navigators, despite color-coded walls and art introduced as landmarks. All testing was in simulation with ROS, the stateof-the-art robot operating system (Quigley et al., 2009) . MengeROS manages the simulation and deliberately introduces error into both the sensor data and action execution (Aroor et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 622, |
| "end": 644, |
| "text": "(Quigley et al., 2009)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 760, |
| "end": 780, |
| "text": "(Aroor et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Empirical Evaluation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "To evaluate WHY we randomly sampled 5 sequences of 40 targets in each world's freespace. Table 6 reports how often voting selected each planner's submission. Two-thirds of the selected plans were based on a modified cost graph, about half of them biased by SemaFORR's spatial model. Because SemaFORR revises its model incrementally, as the robot addresses more targets, it begins to value EXPLOREP's plans less than model-based ones. For example, by the second 20 targets in each sequence of 40, plans based on the spatial model were chosen 8.2% more often, and EXPLOREP's plans 5.4% less often. No plan from NOVELP was ever selected because its plans typically performed poorly in the four affordance-based graphs. Voting, however, included NOVELP to preserve a potential trade-off between exploration and exploitation. We evaluated WHY for its efficiency (average computation time) and diversity (number of unique explanations produced in response to each question). We also calculated the understandability of these explanations by average reading grade level, as measured by the Coleman-Liau index (CLI) (Coleman and Liau, 1975) . Since WHY's goal is to produce explanations for non-experts, lower grade-level scores are more desirable. While one could manipulate the templates to improve these scores, CLI provides a method to compare the complexity of responses to one another. Table 7 analyzes WHY's answers to all 3000 (5 questions \u2022 40 targets \u2022 5 sequences \u2022 3 worlds) questions. Its distinct natural explanations simulate people's ability to vary explanations based on context (Malle, 1999) . WHY averaged 10.4 msec to compute explanations for all five questions about each plan. WHY's approach is also nuanced, with many unique responses per question. For example, WHY produced 49 unique responses to \"Why does your plan go this way?\" out of the 92 possible instantiations of the template. The CLI gauged them at about a sixth-grade reading level, readily understandable to a layperson.", |
| "cite_spans": [ |
| { |
| "start": 1108, |
| "end": 1132, |
| "text": "(Coleman and Liau, 1975)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1588, |
| "end": 1601, |
| "text": "(Malle, 1999)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 89, |
| "end": 96, |
| "text": "Table 6", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1384, |
| "end": 1391, |
| "text": "Table 7", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Empirical Evaluation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "To capture useful spatial affordances for its world model, SemaFORR generalizes over its percepts, the 660 distances to the nearest obstacle that its range finder reports 15 times per second. Each of SemaFORR's planners generates paths in a graph biased by edge weights that represent that planner's objective but share an underlying structure that facilitates plan comparison. Voting guarantees that any selected plan will be optimal with respect to at least one objective, and makes it likely that the plan will also perform well with respect to the others. This also facilitates contrastive explanations in natural spatial language for the robot's planning objectives, alternative paths, and confidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "How a robot control system represents knowledge is integral to natural communication between robots and people, especially in a spatial context. Misunderstandings between a robot and a human often arise from a discrepancy between their spatial mental models. This prompts questions about the robot's underlying decision-making and reasoning mechanisms. WHY's explanations rely on SemaFORR's cognitive underpinnings. Language about the spatial model is readily understood because SemaFORR interprets its percepts much the way people do. SemaFORR's freespace affordances were inspired by sketches after human subjects had actively explored complex virtual worlds (Chrastil and Warren, 2013). The planners' objectives are also analogous to processes empirically identified in people (H\u00f6lscher et al., 2009) . The results here demonstrate that natural language communication with robots benefits substantially when a robot's control system and a human have similar cognitively-based spatial representations. WHY's templates flexibly and quickly produce many different explanations in natural language. The templates focus language generation on Se-maFORR's computational rationale rather than on linguistic structure and grammar. They also facilitate the introduction of new planners without the need to retrain a language generator for a new planning objective. For example, an objective that relied on landmarks could modify the cost graph to reduce costs near them, so that WHY might explain \"I think my way is a lot better at following landmarks.\" Although WHY assumes the human's objective is the shortest path, it can easily substitute any objective representable in a cost graph with an admissible heuristic. SemaFORR could also incorporate a planning objective learned from external demonstration (e.g., inverse reinforcement learning) if that objective were representable as increments to the cost graph's weights.", |
| "cite_spans": [ |
| { |
| "start": 780, |
| "end": 803, |
| "text": "(H\u00f6lscher et al., 2009)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Whenever SemaFORR selects FASTP's plan here, it assumes that it shares the human's objective. Any questions about the robot's plan necessarily challenge that assumption. Presumably, the person asks because they do not recognize their objective there. WHY responds by agreement that the person's plan is the correct way to go (e.g., \"Actually, I agree that we should take the shortest route.\"), even though the question should not have arisen. Another way to address this would be to offer an alternative plan when FASTP is selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Our current work examines how well human subjects understand and feel comfortable with WHY. Although SemaFORR's parameters for intervals (e.g., in Table 3 ) were chosen for G5 and also worked well in other worlds, humans subject evaluation will allow us to confirm or reassess these values. Human-subject studies could also help refine WHY's explanations and incorporate psychophysics and proxemics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 147, |
| "end": 154, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Future work could extend WHY for dialogue (e.g., to clarify confusion or guide navigation (Roman et al., 2020) ). This could incorporate natural language generation with deep learning and facilitate queries to the person. WHY presumes that questions arise from a difference between the human's and the robot's objectives, but they could also stem from a violation of the shared target assumption. A broader system for human-robot collaboration would seek the cause of such a mismatch, use plan explanations to resolve it, and then allow the robot to adjust its responses based on feedback from its human partner. For example, given a plan P from a person or an unspecified heuristic planner, WHY could use the individual objectives in its repertoire to tease apart and then characterize how P weighted its objectives (e.g., \"So distance is more important than travel time?\").", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 110, |
| "text": "(Roman et al., 2020)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Meanwhile, SemaFORR's cognitively-based spatial model supports important path planning objectives and human-friendly explanations of its behavior, intentions, and confidence. Empirical results in three large, complex, realistic worlds show that our approach produces diverse, understandable contrastive explanations in natural language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "8" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported in part by The National Science Foundation under CNS-1625843. The authors thank Anoop Aroor for MengeROS.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "MengeROS: A Crowd Simulation Tool for Autonomous Robot Navigation", |
| "authors": [ |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Aroor", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Susan", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Epstein", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korpan", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of AAAI Fall Symposium on Artificial Intelligence for Human-Robot Interaction", |
| "volume": "", |
| "issue": "", |
| "pages": "123--125", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anoop Aroor, Susan L Epstein, and Raj Korpan. 2017. MengeROS: A Crowd Simulation Tool for Autonomous Robot Navigation. In Proceedings of AAAI Fall Symposium on Artificial Intelligence for Human-Robot Interaction, pages 123-125. AAAI.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Driving Under the Influence (of Language)", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [ |
| "Paul" |
| ], |
| "last": "Barrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [ |
| "Alan" |
| ], |
| "last": "Bronikowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Haonan", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [ |
| "Mark" |
| ], |
| "last": "Siskind", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IEEE Transactions on Neural Networks and Learning Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1--16", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TNNLS.2017.2693278" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Paul Barrett, Scott Alan Bronikowski, Haonan Yu, and Jeffrey Mark Siskind. 2017. Driving Under the Influence (of Language). IEEE Transactions on Neural Networks and Learning Systems, pages 1-16.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Plan explanations as model reconciliation-an empirical study", |
| "authors": [ |
| { |
| "first": "Tathagata", |
| "middle": [], |
| "last": "Chakraborti", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarath", |
| "middle": [], |
| "last": "Sreedharan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sachin", |
| "middle": [], |
| "last": "Grover", |
| "suffix": "" |
| }, |
| { |
| "first": "Subbarao", |
| "middle": [], |
| "last": "Kambhampati", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "14th ACM/IEEE International Conference on Human-Robot Interaction (HRI)", |
| "volume": "", |
| "issue": "", |
| "pages": "258--266", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/HRI.2019.8673193" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tathagata Chakraborti, Sarath Sreedharan, Sachin Grover, and Subbarao Kambhampati. 2019. Plan explanations as model reconciliation-an empirical study. In 2019 14th ACM/IEEE International Con- ference on Human-Robot Interaction (HRI), pages 258-266. IEEE.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Active and passive spatial learning in human navigation: Acquisition of survey knowledge", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Elizabeth", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chrastil", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Warren", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal of experimental psychology: learning, memory, and cognition", |
| "volume": "39", |
| "issue": "5", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/a0032382" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elizabeth R Chrastil and William H Warren. 2013. Ac- tive and passive spatial learning in human naviga- tion: Acquisition of survey knowledge. Journal of experimental psychology: learning, memory, and cognition, 39(5):1520.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "From cognitive maps to cognitive graphs", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Elizabeth", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chrastil", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Warren", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "PloS one", |
| "volume": "9", |
| "issue": "11", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1371/journal.pone.0112544" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elizabeth R Chrastil and William H Warren. 2014. From cognitive maps to cognitive graphs. PloS one, 9(11):e112544.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A Computer Readability Formula Designed for Machine Scoring", |
| "authors": [ |
| { |
| "first": "Meri", |
| "middle": [], |
| "last": "Coleman", |
| "suffix": "" |
| }, |
| { |
| "first": "Ta", |
| "middle": [ |
| "Lin" |
| ], |
| "last": "Liau", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Journal of Applied Psychology", |
| "volume": "60", |
| "issue": "2", |
| "pages": "283--284", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/h0076540" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meri Coleman and Ta Lin Liau. 1975. A Computer Readability Formula Designed for Machine Scoring. Journal of Applied Psychology, 60(2):283-284.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning spatial models for navigation", |
| "authors": [ |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Susan L Epstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Aroor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Evanusa", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Conference on Spatial Information Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "403--425", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-319-23374-1_19" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Susan L Epstein, Anoop Aroor, Matthew Evanusa, Eliz- abeth I Sklar, and Simon Parsons. 2015. Learning spatial models for navigation. In International Con- ference on Spatial Information Theory, pages 403- 425. Springer.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Planning and explanations with a learned spatial model", |
| "authors": [ |
| { |
| "first": "Susan", |
| "middle": [ |
| "L" |
| ], |
| "last": "Epstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Korpan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Spatial Information Theory", |
| "volume": "142", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.4230/LIPIcs.COSIT.2019.22" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Susan L. Epstein and Raj Korpan. 2019. Planning and explanations with a learned spatial model. In Inter- national Conference on Spatial Information Theory, volume 142 of LIPICS, pages 22:1-22:20. Schloss Dagstuhl.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Spatial perception and action. In Handbook of spatial cognition", |
| "authors": [ |
| { |
| "first": "Flip", |
| "middle": [], |
| "last": "Brett R Fajen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Phillips", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/13936-004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brett R Fajen and Flip Phillips. 2013. Spatial percep- tion and action. In Handbook of spatial cognition. American Psychological Association.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Do humans integrate routes into a cognitive map? map-versus landmark-based navigation of novel shortcuts", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Foo", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Warren", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "J" |
| ], |
| "last": "Duchon", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tarr", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Journal of Experimental Psychology: Learning, Memory, and Cognition", |
| "volume": "31", |
| "issue": "2", |
| "pages": "195--215", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/0278-7393.31.2.195" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Foo, William H Warren, Andrew Duchon, and Michael J Tarr. 2005. Do humans integrate routes into a cognitive map? map-versus landmark-based navigation of novel shortcuts. Journal of Experimen- tal Psychology: Learning, Memory, and Cognition, 31(2):195-215.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Explainable planning", |
| "authors": [ |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Fox", |
| "suffix": "" |
| }, |
| { |
| "first": "Derek", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Magazzeni", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IJCAI-17 Workshop on Explainable AI (XAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maria Fox, Derek Long, and Daniele Magazzeni. 2017. Explainable planning. In IJCAI-17 Workshop on Ex- plainable AI (XAI), page 24.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "The organization of learning", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Charles R Gallistel", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles R Gallistel. 1990. The organization of learn- ing. The MIT Press.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The theory of affordances. Perceiving, Acting, and Knowing: Toward an Ecological Psychology", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "67--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James J Gibson. 1977. The theory of affordances. Per- ceiving, Acting, and Knowing: Toward an Ecologi- cal Psychology, pages 67-82.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Wayfinding behavior: Cognitive mapping and other spatial processes", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Reginald", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Golledge", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "5--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reginald G Golledge. 1999. Human wayfinding and cognitive maps. Wayfinding behavior: Cognitive mapping and other spatial processes, pages 5-45.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "How explainable plans can make planning faster", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Grea", |
| "suffix": "" |
| }, |
| { |
| "first": "La\u00ebtitia", |
| "middle": [], |
| "last": "Matignon", |
| "suffix": "" |
| }, |
| { |
| "first": "Samir", |
| "middle": [], |
| "last": "Aknine", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Workshop on Explainable Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "58--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Grea, La\u00ebtitia Matignon, and Samir Aknine. 2018. How explainable plans can make planning faster. In Workshop on Explainable Artificial Intelli- gence, pages 58-64.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A formal basis for the heuristic determination of minimum cost paths", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hart", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "J" |
| ], |
| "last": "Nilsson", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Raphael", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "IEEE Transactions on Systems Science and Cybernetics", |
| "volume": "4", |
| "issue": "2", |
| "pages": "100--107", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TSSC.1968.300136" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. E. Hart, N. J. Nilsson, and B. Raphael. 1968. A formal basis for the heuristic determination of mini- mum cost paths. IEEE Transactions on Systems Sci- ence and Cybernetics, 4(2):100-107.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Explainable AI planning (XAIP): overview and the case of contrastive explanation", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Magazzeni", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Reasoning Web. Explainable Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "277--282", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-3-030-31423-1_9" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Hoffmann and Daniele Magazzeni. 2019. Explain- able AI planning (XAIP): overview and the case of contrastive explanation. Reasoning Web. Explain- able Artificial Intelligence, pages 277-282.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Adaptivity of wayfinding strategies in a multi-building ensemble: The effects of spatial structure, task requirements, and metric information", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "H\u00f6lscher", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Simon", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "B\u00fcchner", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Meilinger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Strube", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of Environmental Psychology", |
| "volume": "29", |
| "issue": "2", |
| "pages": "208--219", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.jenvp.2008.05.010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph H\u00f6lscher, Simon J B\u00fcchner, Tobias Meilinger, and Gerhard Strube. 2009. Adaptivity of wayfinding strategies in a multi-building ensemble: The effects of spatial structure, task requirements, and metric information. Journal of Environmental Psychology, 29(2):208-219.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Enabling robots to communicate their objectives", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Sandy", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pieter", |
| "middle": [], |
| "last": "Held", |
| "suffix": "" |
| }, |
| { |
| "first": "Anca D", |
| "middle": [], |
| "last": "Abbeel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dragan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Autonomous Robots", |
| "volume": "43", |
| "issue": "2", |
| "pages": "309--326", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s10514-018-9771-0" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sandy H Huang, David Held, Pieter Abbeel, and Anca D Dragan. 2019. Enabling robots to com- municate their objectives. Autonomous Robots, 43(2):309-326.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Semantic mapping for mobile robotics tasks: A survey", |
| "authors": [ |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Kostavelis", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonios", |
| "middle": [], |
| "last": "Gasteratos", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Robotics and Autonomous Systems", |
| "volume": "66", |
| "issue": "", |
| "pages": "86--103", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.robot.2014.12.006" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ioannis Kostavelis and Antonios Gasteratos. 2015. Se- mantic mapping for mobile robotics tasks: A survey. Robotics and Autonomous Systems, 66:86-103.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Model-based contrastive explanations for explainable planning", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Krarup", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Cashmore", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Magazzeni", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ICAPS 2019 Workshop on Explainable AI Planning (XAIP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Krarup, Michael Cashmore, Daniele Mag- azzeni, and Tim Miller. 2019. Model-based con- trastive explanations for explainable planning. In ICAPS 2019 Workshop on Explainable AI Planning (XAIP).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "The spatial semantic hierarchy", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Kuipers", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Artificial intelligence", |
| "volume": "119", |
| "issue": "1-2", |
| "pages": "191--233", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0004-3702(00)00017-5" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Kuipers. 2000. The spatial semantic hierar- chy. Artificial intelligence, 119(1-2):191-233.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A Review of Spatial Reasoning and Interaction for Real-World Robotics", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Landsiedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Verena", |
| "middle": [], |
| "last": "Rieser", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Walter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Wollherr", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advanced Robotics", |
| "volume": "31", |
| "issue": "", |
| "pages": "222--242", |
| "other_ids": { |
| "DOI": [ |
| "10.1080/01691864.2016.1277554" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Landsiedel, Verena Rieser, Matthew Walter, and Dirk Wollherr. 2017. A Review of Spatial Rea- soning and Interaction for Real-World Robotics. Ad- vanced Robotics, 31(5):222-242.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Web planner: A tool to develop classical planning domains and visualize heuristic state-space search", |
| "authors": [ |
| { |
| "first": "Ramon", |
| "middle": [ |
| "Fraga" |
| ], |
| "last": "Maur\u00edcio Cec\u00edlio Magnaguagno", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Felipe", |
| "middle": [ |
| "Rech" |
| ], |
| "last": "Duarte M\u00f3re", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Meneguzzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Workshop on User Interfaces and Scheduling and Planning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maur\u00edcio Cec\u00edlio Magnaguagno, Ramon Fraga Pereira, Martin Duarte M\u00f3re, and Felipe Rech Meneguzzi. 2017. Web planner: A tool to develop classical planning domains and visualize heuristic state-space search. In 2017 Workshop on User Interfaces and Scheduling and Planning.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "How People Explain Behavior: A New Theoretical Framework. Personality and Social", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Bertram", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Malle", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Psychology Review", |
| "volume": "3", |
| "issue": "1", |
| "pages": "23--48", |
| "other_ids": { |
| "DOI": [ |
| "10.1207/s15327957pspr0301_2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bertram F Malle. 1999. How People Explain Behavior: A New Theoretical Framework. Personality and So- cial Psychology Review, 3(1):23-48.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Spoken language interaction with robots: Research issues and recommendations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Marge", |
| "suffix": "" |
| }, |
| { |
| "first": "Carol", |
| "middle": [], |
| "last": "Espy-Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Nigel", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Marge, Carol Espy-Wilson, and Nigel Ward. 2020. Spoken language interaction with robots: Re- search issues and recommendations. Report from the NSF Future Directions Workshop.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Hedging and Discourse: Approaches to the Analysis of a Pragmatic Phenomenon in Academic Texts", |
| "authors": [ |
| { |
| "first": "Raija", |
| "middle": [], |
| "last": "Markkanen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hartmut", |
| "middle": [], |
| "last": "Schr\u00f6der", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1515/9783110807332" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raija Markkanen and Hartmut Schr\u00f6der. 1997. Hedg- ing and Discourse: Approaches to the Analysis of a Pragmatic Phenomenon in Academic Texts, vol- ume 24. Walter de Gruyter.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Explanation in artificial intelligence: Insights from the social sciences. Artificial intelligence", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "267", |
| "issue": "", |
| "pages": "1--38", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.artint.2018.07.007" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Miller. 2019. Explanation in artificial intelligence: Insights from the social sciences. Artificial intelli- gence, 267:1-38.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Explainable planning using answer set programming", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Van Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Loukas", |
| "middle": [], |
| "last": "Stylianos", |
| "suffix": "" |
| }, |
| { |
| "first": "Tran", |
| "middle": [], |
| "last": "Vasileiou", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Cao Son", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yeoh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the International Conference on Principles of Knowledge Representation and Reasoning", |
| "volume": "17", |
| "issue": "", |
| "pages": "662--666", |
| "other_ids": { |
| "DOI": [ |
| "10.24963/kr.2020/66" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Van Nguyen, Stylianos Loukas Vasileiou, Tran Cao Son, and William Yeoh. 2020. Explainable plan- ning using answer set programming. In Proceed- ings of the International Conference on Principles of Knowledge Representation and Reasoning, vol- ume 17, pages 662-666.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Where do you think you're going? characterizing spatial mental models from planned routes", |
| "authors": [ |
| { |
| "first": "Arthur", |
| "middle": [ |
| "W" |
| ], |
| "last": "Brandon S Perelman", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "Evans", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristin", |
| "middle": [ |
| "E" |
| ], |
| "last": "Schaefer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACM Transactions on Human-Robot Interaction (THRI)", |
| "volume": "9", |
| "issue": "4", |
| "pages": "1--55", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3385008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brandon S Perelman, Arthur W Evans III, and Kristin E Schaefer. 2020. Where do you think you're going? characterizing spatial mental models from planned routes. ACM Transactions on Human-Robot Inter- action (THRI), 9(4):1-55.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Dynamic Generation and Refinement of Robot Verbalization", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vittorio Perera", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Selveraj", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Veloso", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "25th IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)", |
| "volume": "", |
| "issue": "", |
| "pages": "212--218", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ROMAN.2016.7745133" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vittorio Perera, Sai P Selveraj, Stephanie Rosenthal, and Manuela Veloso. 2016. Dynamic Generation and Refinement of Robot Verbalization. In 25th IEEE International Symposium on Robot and Hu- man Interactive Communication (RO-MAN), pages 212-218. IEEE.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "What is the model in model-based planning?", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Pouncy", |
| "suffix": "" |
| }, |
| { |
| "first": "Pedro", |
| "middle": [], |
| "last": "Tsividis", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gershman", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Cognitive Science", |
| "volume": "45", |
| "issue": "1", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1111/cogs.12928" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Pouncy, Pedro Tsividis, and Samuel J Gersh- man. 2021. What is the model in model-based plan- ning? Cognitive Science, 45(1):e12928.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "ROS: an open-source Robot Operating System", |
| "authors": [ |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Quigley", |
| "suffix": "" |
| }, |
| { |
| "first": "Ken", |
| "middle": [], |
| "last": "Conley", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [ |
| "P" |
| ], |
| "last": "Gerkey", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Faust", |
| "suffix": "" |
| }, |
| { |
| "first": "Tully", |
| "middle": [], |
| "last": "Foote", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Leibs", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Wheeler", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ICRA Workshop on Open Source Software", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Morgan Quigley, Ken Conley, Brian P. Gerkey, Josh Faust, Tully Foote, Jeremy Leibs, Rob Wheeler, and Andrew Y. Ng. 2009. ROS: an open-source Robot Operating System. In ICRA Workshop on Open Source Software.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Rmm: A recursive mental model for dialog navigation", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Homero Roman Roman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Asli", |
| "middle": [], |
| "last": "Thomason", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Celikyilmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", |
| "volume": "", |
| "issue": "", |
| "pages": "1732--1745", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.157" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Homero Roman Roman, Yonatan Bisk, Jesse Thoma- son, Asli Celikyilmaz, and Jianfeng Gao. 2020. Rmm: A recursive mental model for dialog navi- gation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 1732-1745.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Explainability in human-agent systems", |
| "authors": [ |
| { |
| "first": "Avi", |
| "middle": [], |
| "last": "Rosenfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariella", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Autonomous Agents and Multi-Agent Systems", |
| "volume": "33", |
| "issue": "6", |
| "pages": "673--705", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s10458-019-09408-y" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Avi Rosenfeld and Ariella Richardson. 2019. Ex- plainability in human-agent systems. Autonomous Agents and Multi-Agent Systems, 33(6):673-705.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Verbalization: Narration of Autonomous Mobile Robot Experience", |
| "authors": [ |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Selvaraj", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Veloso", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the International Joint Conference on Artificial Intelligence", |
| "volume": "16", |
| "issue": "", |
| "pages": "862--868", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/abs/10.5555/3060621.3060741" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephanie Rosenthal, Sai P Selvaraj, and Manuela Veloso. 2016. Verbalization: Narration of Au- tonomous Mobile Robot Experience. In Proceed- ings of the International Joint Conference on Arti- ficial Intelligence, volume 16, pages 862-868.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Natural Language Explanations in Human-Collaborative Systems", |
| "authors": [ |
| { |
| "first": "Rosario", |
| "middle": [], |
| "last": "Scalise", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Rosenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddhartha", |
| "middle": [], |
| "last": "Srinivasa", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 ACM/IEEE International Conference on Human-Robot Interaction", |
| "volume": "", |
| "issue": "", |
| "pages": "377--378", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3029798.3034809" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rosario Scalise, Stephanie Rosenthal, and Siddhartha Srinivasa. 2017. Natural Language Explanations in Human-Collaborative Systems. In Proceedings of the 2017 ACM/IEEE International Conference on Human-Robot Interaction, pages 377-378. ACM.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Making hybrid plans more clear to human users-a formal approach for generating sound explanations", |
| "authors": [ |
| { |
| "first": "Bastian", |
| "middle": [], |
| "last": "Seegebarth", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernd", |
| "middle": [], |
| "last": "Schattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Susanne", |
| "middle": [], |
| "last": "Biundo", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the International Conference on Automated Planning and Scheduling", |
| "volume": "22", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/abs/10.5555/3038546.3038573" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bastian Seegebarth, Felix M\u00fcller, Bernd Schattenberg, and Susanne Biundo. 2012. Making hybrid plans more clear to human users-a formal approach for generating sound explanations. In Proceedings of the International Conference on Automated Plan- ning and Scheduling, volume 22.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Verbal explanations by collaborating robot teams. Paladyn", |
| "authors": [ |
| { |
| "first": "Avinash Kumar", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Neha", |
| "middle": [], |
| "last": "Baranwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai-Florian", |
| "middle": [], |
| "last": "Richter", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Hellstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Suna", |
| "middle": [], |
| "last": "Bensch", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Journal of Behavioral Robotics", |
| "volume": "12", |
| "issue": "1", |
| "pages": "47--57", |
| "other_ids": { |
| "DOI": [ |
| "10.1515/pjbr-2021-0001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Avinash Kumar Singh, Neha Baranwal, Kai-Florian Richter, Thomas Hellstr\u00f6m, and Suna Bensch. 2021. Verbal explanations by collaborating robot teams. Paladyn, Journal of Behavioral Robotics, 12(1):47- 57.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Tradeoff-focused contrastive explanation for MDP planning", |
| "authors": [ |
| { |
| "first": "Roykrong", |
| "middle": [], |
| "last": "Sukkerd", |
| "suffix": "" |
| }, |
| { |
| "first": "Reid", |
| "middle": [], |
| "last": "Simmons", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Garlan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 29th IEEE International Conference on Robot and Human Interactive Communication", |
| "volume": "", |
| "issue": "", |
| "pages": "1041--1048", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/RO-MAN47096.2020.9223614" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roykrong Sukkerd, Reid Simmons, and David Garlan. 2020. Tradeoff-focused contrastive explanation for MDP planning. In 2020 29th IEEE International Conference on Robot and Human Interactive Com- munication, pages 1041-1048. IEEE.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Cognitive maps, cognitive collages, and spatial mental models", |
| "authors": [ |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Tversky", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "European Conference on Spatial Information Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "14--24", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/3-540-57207-4_2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barbara Tversky. 1993. Cognitive maps, cognitive col- lages, and spatial mental models. In European Con- ference on Spatial Information Theory, pages 14-24. Springer.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Wormholes in virtual space: From cognitive maps to cognitive graphs", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Warren", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Daniel B Rothman", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Benjamin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "D" |
| ], |
| "last": "Schnapp", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ericson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Cognition", |
| "volume": "166", |
| "issue": "", |
| "pages": "152--163", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.cognition.2017.05.020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "William H Warren, Daniel B Rothman, Benjamin H Schnapp, and Jonathan D Ericson. 2017. Worm- holes in virtual space: From cognitive maps to cog- nitive graphs. Cognition, 166:152-163.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "text": "Affordances in a simple artificial world Korpan, 2019). At decision point", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF2": { |
| "text": "Figure 1(d)shows some acquired minor-diagonal hallways.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF0": { |
| "text": "SemaFORR's planners and their objectives", |
| "html": null, |
| "content": "<table><tr><td>Planner</td><td>Objective</td></tr><tr><td>FASTP</td><td>Minimize distance traveled</td></tr><tr><td>SAFEP</td><td>Avoid obstacles</td></tr><tr><td>EXPLOREP</td><td>Avoid paths</td></tr><tr><td>NOVELP</td><td>Avoid spatial model</td></tr><tr><td>CONVEYP</td><td>Exploit conveyors</td></tr><tr><td colspan=\"2\">HALLWAYP Exploit hallways</td></tr><tr><td>REGIONP</td><td>Exploit regions, doors, exits</td></tr><tr><td>TRAILP</td><td>Exploit trail markers</td></tr><tr><td colspan=\"2\">imposed on the world's footprint. Conveyors tally</td></tr><tr><td colspan=\"2\">how often trails pass through them. Higher-count</td></tr><tr><td colspan=\"2\">cells represent locations that frequently support</td></tr><tr><td colspan=\"2\">travel. They appear darker in</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "Language for the planners' objectives. N * (\u03b2) and N (\u03b2) values for FASTP and EXPLOREP are as shown. For the others, N * (\u03b2) \u2248 N (\u03b2), where N * (\u03b2) begins with \"better at\" and N (\u03b2) begins with \"worse at.\"", |
| "html": null, |
| "content": "<table><tr><td>Planner</td><td>N (\u03b2)</td><td>N * (\u03b2) N (\u03b2)</td></tr><tr><td>FASTP</td><td>short</td><td>shorter longer</td></tr><tr><td colspan=\"2\">EXPLOREP goes a new way</td><td>newer familiar</td></tr><tr><td>SAFEP</td><td>stays far from obstacles</td><td>staying far from obstacles</td></tr><tr><td>NOVELP</td><td>learns something new</td><td>learning something new</td></tr><tr><td>CONVEYP</td><td colspan=\"2\">goes through well-traveled areas going through well-traveled areas</td></tr><tr><td colspan=\"2\">HALLWAYP follows hallways</td><td>following hallways</td></tr><tr><td>REGIONP</td><td>goes through open areas</td><td>going through open areas</td></tr><tr><td>TRAILP</td><td colspan=\"2\">follows ways we've gone before following ways we've gone before</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF2": { |
| "text": "Language for value intervals for the difference D. For affordance-based planners a=150 and b=25, for SAFEP a=0.35 and b=0.15, for EXPLOREP a=100 and b=15, and for NOVELP a=350 and b=100.", |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF3": { |
| "text": "How often planners won the vote", |
| "html": null, |
| "content": "<table><tr><td>Planner</td><td>M5</td><td>H10</td><td>G5</td><td>Total</td></tr><tr><td>FASTP</td><td colspan=\"4\">25.0% 42.9% 32.4% 33.4%</td></tr><tr><td>SAFEP</td><td colspan=\"4\">37.0% 25.7% 27.5% 30.1%</td></tr><tr><td colspan=\"2\">EXPLOREP 9.0%</td><td>6.9%</td><td>4.9%</td><td>6.9%</td></tr><tr><td>NOVELP</td><td>0.0%</td><td>0.0%</td><td>0.0%</td><td>0.0%</td></tr><tr><td colspan=\"2\">CONVEYP 14.0%</td><td colspan=\"3\">7.4% 16.5% 12.6%</td></tr><tr><td colspan=\"2\">HALLWAYP 6.0%</td><td>9.1%</td><td>6.6%</td><td>7.2%</td></tr><tr><td>REGIONP</td><td>5.5%</td><td>6.3%</td><td>0.5%</td><td>4.1%</td></tr><tr><td>TRAILP</td><td>3.5%</td><td colspan=\"2\">1.7% 11.5%</td><td>5.6%</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF4": { |
| "text": "Analysis of explanation results with number of unique phrasings and average readability scores", |
| "html": null, |
| "content": "<table><tr><td>Unique phrasings</td><td colspan=\"4\">M5 H10 G5 All</td></tr><tr><td>Why this way?</td><td>38</td><td>30</td><td>39</td><td>49</td></tr><tr><td>How sure are you?</td><td>24</td><td>19</td><td>26</td><td>30</td></tr><tr><td>Another way?</td><td>24</td><td>19</td><td>26</td><td>30</td></tr><tr><td>Why yours?</td><td>17</td><td>15</td><td>16</td><td>18</td></tr><tr><td>How to get there?</td><td colspan=\"4\">199 175 182 556</td></tr><tr><td colspan=\"5\">Average readability M5 H10 G5 All</td></tr><tr><td>Why this way?</td><td>4.7</td><td colspan=\"3\">5.3 5.3 5.1</td></tr><tr><td>How sure are you?</td><td>6.6</td><td colspan=\"3\">6.6 6.7 6.7</td></tr><tr><td>Another way?</td><td>3.8</td><td colspan=\"3\">2.7 3.5 3.3</td></tr><tr><td>Why yours?</td><td>6.8</td><td colspan=\"3\">7.0 7.2 7.0</td></tr><tr><td>How to get there?</td><td>7.7</td><td colspan=\"3\">7.8 7.8 7.8</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |